[llvm] [RISCV] Convert remaining constant splats in tests to splat shorthand. NFC (PR #87616)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 8 16:43:06 PDT 2024


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/87616

>From 0c14ff01e75c02be8b406543b6083dec5adcb417 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Thu, 4 Apr 2024 17:33:29 +0800
Subject: [PATCH 1/2] [RISCV] Convert remaining constant splats in tests to
 splat shorthand. NFC

A handy shorthand for specifying the shufflevector(insertelement(poison, foo, 0), poison, zeroinitializer) splat pattern was introduced in #74620.

Some of the RISC-V tests were converted over to use this new form in dbb65dd330cc1696d7ca3dedc7aa9fa12c55a075, this patch handles the rest.

This converts some constant expressions to the new form, but it also converts instruction sequences that weren't previously constant expressions to constant expressions as well. In some cases this causes some minor code motion in the test output, but it's not significant to the test itself. E.g. see llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-{gather,scatter}.ll
---
 llvm/test/CodeGen/RISCV/rvv/abs-vp.ll         |   96 +-
 llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll  |   96 +-
 llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll       |   68 +-
 llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll        |   68 +-
 llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll      |   16 +-
 llvm/test/CodeGen/RISCV/rvv/combine-sats.ll   |   44 +-
 llvm/test/CodeGen/RISCV/rvv/combine-splats.ll |   56 +-
 .../CodeGen/RISCV/rvv/constant-folding.ll     |    8 +-
 llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll        |  192 +-
 llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll       |   96 +-
 llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll        |  192 +-
 llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll  |   16 +-
 .../CodeGen/RISCV/rvv/extractelt-int-rv32.ll  |   20 +-
 .../CodeGen/RISCV/rvv/extractelt-int-rv64.ll  |   20 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll |   72 +-
 .../RISCV/rvv/fixed-vectors-bitreverse-vp.ll  |   72 +-
 .../RISCV/rvv/fixed-vectors-bswap-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-ceil-vp.ll        |   56 +-
 .../RISCV/rvv/fixed-vectors-ctlz-vp.ll        |  144 +-
 .../RISCV/rvv/fixed-vectors-ctpop-vp.ll       |   72 +-
 .../RISCV/rvv/fixed-vectors-cttz-vp.ll        |  144 +-
 .../RISCV/rvv/fixed-vectors-floor-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-fmaximum-vp.ll    |   52 +-
 .../RISCV/rvv/fixed-vectors-fminimum-vp.ll    |   52 +-
 .../RISCV/rvv/fixed-vectors-fp-splat.ll       |   48 +-
 .../RISCV/rvv/fixed-vectors-fpext-vp.ll       |    6 +-
 .../RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll |    6 +-
 .../RISCV/rvv/fixed-vectors-fptosi-vp.ll      |   26 +-
 .../RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll |    6 +-
 .../RISCV/rvv/fixed-vectors-fptoui-vp.ll      |   26 +-
 .../RISCV/rvv/fixed-vectors-fptrunc-vp.ll     |    6 +-
 .../RISCV/rvv/fixed-vectors-int-setcc.ll      |   44 +-
 .../RISCV/rvv/fixed-vectors-int-splat.ll      |   64 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-int.ll    |  208 +-
 .../RISCV/rvv/fixed-vectors-masked-gather.ll  | 2243 ++++-------------
 .../RISCV/rvv/fixed-vectors-masked-scatter.ll |  494 +---
 .../RISCV/rvv/fixed-vectors-nearbyint-vp.ll   |   56 +-
 .../rvv/fixed-vectors-peephole-vmerge-vops.ll |  108 +-
 .../RISCV/rvv/fixed-vectors-rint-vp.ll        |   56 +-
 .../RISCV/rvv/fixed-vectors-round-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-roundeven-vp.ll   |   56 +-
 .../RISCV/rvv/fixed-vectors-roundtozero-vp.ll |   56 +-
 .../RISCV/rvv/fixed-vectors-setcc-int-vp.ll   |  216 +-
 .../RISCV/rvv/fixed-vectors-sext-vp-mask.ll   |    6 +-
 .../RISCV/rvv/fixed-vectors-sext-vp.ll        |   14 +-
 .../RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll |    6 +-
 .../RISCV/rvv/fixed-vectors-sitofp-vp.ll      |   26 +-
 .../RISCV/rvv/fixed-vectors-strided-vpload.ll |   36 +-
 .../rvv/fixed-vectors-strided-vpstore.ll      |   12 +-
 .../RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll |    6 +-
 .../RISCV/rvv/fixed-vectors-uitofp-vp.ll      |   26 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll |   32 +-
 .../RISCV/rvv/fixed-vectors-vadd-vp.ll        |  356 +--
 .../RISCV/rvv/fixed-vectors-vand-vp.ll        |  310 +--
 .../RISCV/rvv/fixed-vectors-vcopysign-vp.ll   |   56 +-
 .../RISCV/rvv/fixed-vectors-vdiv-vp.ll        |  128 +-
 .../RISCV/rvv/fixed-vectors-vdivu-vp.ll       |  128 +-
 .../RISCV/rvv/fixed-vectors-vfabs-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-vfadd-vp.ll       |  100 +-
 .../RISCV/rvv/fixed-vectors-vfclass-vp.ll     |   44 +-
 .../RISCV/rvv/fixed-vectors-vfdiv-vp.ll       |   96 +-
 .../RISCV/rvv/fixed-vectors-vfma-vp.ll        |  104 +-
 .../RISCV/rvv/fixed-vectors-vfmacc-vp.ll      |  432 +---
 .../RISCV/rvv/fixed-vectors-vfmax-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-vfmin-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-vfmsac-vp.ll      |  624 ++---
 .../RISCV/rvv/fixed-vectors-vfmul-vp.ll       |   96 +-
 .../RISCV/rvv/fixed-vectors-vfmuladd-vp.ll    |  104 +-
 .../RISCV/rvv/fixed-vectors-vfneg-vp.ll       |   56 +-
 .../RISCV/rvv/fixed-vectors-vfnmacc-vp.ll     |  816 +++---
 .../RISCV/rvv/fixed-vectors-vfnmsac-vp.ll     |  624 ++---
 .../RISCV/rvv/fixed-vectors-vfrdiv-vp.ll      |   48 +-
 .../RISCV/rvv/fixed-vectors-vfrsub-vp.ll      |   48 +-
 .../RISCV/rvv/fixed-vectors-vfsqrt-vp.ll      |   56 +-
 .../RISCV/rvv/fixed-vectors-vfsub-vp.ll       |   96 +-
 .../RISCV/rvv/fixed-vectors-vmacc-vp.ll       |  720 ++----
 .../RISCV/rvv/fixed-vectors-vmax-vp.ll        |  152 +-
 .../RISCV/rvv/fixed-vectors-vmaxu-vp.ll       |  152 +-
 .../RISCV/rvv/fixed-vectors-vmin-vp.ll        |  152 +-
 .../RISCV/rvv/fixed-vectors-vminu-vp.ll       |  152 +-
 .../RISCV/rvv/fixed-vectors-vmul-vp.ll        |  136 +-
 .../RISCV/rvv/fixed-vectors-vnmsac-vp.ll      |  720 ++----
 .../CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll |  306 +--
 .../RISCV/rvv/fixed-vectors-vpgather.ll       |   32 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-vpload.ll |   32 +-
 .../RISCV/rvv/fixed-vectors-vpmerge.ll        |   72 +-
 .../RISCV/rvv/fixed-vectors-vpscatter.ll      |   32 +-
 .../RISCV/rvv/fixed-vectors-vpstore.ll        |    4 +-
 .../RISCV/rvv/fixed-vectors-vrem-vp.ll        |  128 +-
 .../RISCV/rvv/fixed-vectors-vremu-vp.ll       |  128 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-vror.ll   |   88 +-
 .../RISCV/rvv/fixed-vectors-vrsub-vp.ll       |  224 +-
 .../RISCV/rvv/fixed-vectors-vsadd-vp.ll       |  356 +--
 .../CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll  |   64 +-
 .../RISCV/rvv/fixed-vectors-vsaddu-vp.ll      |  356 +--
 .../CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll |   64 +-
 .../RISCV/rvv/fixed-vectors-vselect.ll        |   24 +-
 .../RISCV/rvv/fixed-vectors-vshl-vp.ll        |  288 +--
 .../RISCV/rvv/fixed-vectors-vsra-vp.ll        |  288 +--
 .../RISCV/rvv/fixed-vectors-vsrl-vp.ll        |  288 +--
 .../RISCV/rvv/fixed-vectors-vssub-vp.ll       |  356 +--
 .../CodeGen/RISCV/rvv/fixed-vectors-vssub.ll  |   64 +-
 .../RISCV/rvv/fixed-vectors-vssubu-vp.ll      |  356 +--
 .../CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll |   64 +-
 .../RISCV/rvv/fixed-vectors-vsub-vp.ll        |  136 +-
 .../RISCV/rvv/fixed-vectors-vxor-vp.ll        |  476 +---
 .../RISCV/rvv/fixed-vectors-zext-vp-mask.ll   |    6 +-
 .../RISCV/rvv/fixed-vectors-zext-vp.ll        |   14 +-
 llvm/test/CodeGen/RISCV/rvv/floor-vp.ll       |   68 +-
 llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll    |   64 +-
 llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll    |   64 +-
 .../RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll     |    4 +-
 .../test/CodeGen/RISCV/rvv/masked-load-int.ll |    4 +-
 .../CodeGen/RISCV/rvv/masked-store-int.ll     |    4 +-
 llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll |   28 +-
 .../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll |   28 +-
 .../CodeGen/RISCV/rvv/narrow-shift-extend.ll  |   56 +-
 llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll   |   72 +-
 llvm/test/CodeGen/RISCV/rvv/rint-vp.ll        |   72 +-
 llvm/test/CodeGen/RISCV/rvv/round-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll   |   72 +-
 llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll |   72 +-
 .../rvv/rvv-peephole-vmerge-masked-vops.ll    |   68 +-
 .../RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll |    8 +-
 .../RISCV/rvv/rvv-peephole-vmerge-vops.ll     |  130 +-
 .../CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll    |   28 +-
 llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll   |  432 +---
 llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll  |  502 +---
 .../CodeGen/RISCV/rvv/sink-splat-operands.ll  |    4 +-
 llvm/test/CodeGen/RISCV/rvv/stepvector.ll     |   48 +-
 .../CodeGen/RISCV/rvv/strided-load-store.ll   |   12 +-
 llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll |   44 +-
 .../test/CodeGen/RISCV/rvv/strided-vpstore.ll |   12 +-
 llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll |   32 +-
 llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll  |  118 +-
 llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll    |  190 +-
 llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll        |  442 +---
 llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll    |  278 +-
 llvm/test/CodeGen/RISCV/rvv/vand-vp.ll        |  414 +--
 llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll   |  176 +-
 llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll   |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll    |  110 +-
 llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll        |  176 +-
 llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll   |  142 +-
 llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll       |  176 +-
 llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll   |    8 +-
 llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll       |  132 +-
 llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll     |   32 +-
 llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll   |    8 +-
 llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll       |  128 +-
 .../test/CodeGen/RISCV/rvv/vfma-vp-combine.ll |   16 +-
 llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll        | 1778 +++++--------
 llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll      |  540 ++--
 llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll       |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll       |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll      |  780 ++----
 llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll   |    8 +-
 llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll       |  128 +-
 llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll    | 1778 +++++--------
 llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll     | 1020 +++-----
 llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll     |  780 ++----
 llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll      |   60 +-
 llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll      |   60 +-
 llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll      |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll   |    8 +-
 llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll       |  128 +-
 llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll     |  226 +-
 llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll     |  110 +-
 llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll    |  264 +-
 llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll    |  180 +-
 llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll       |  880 +++----
 llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll       |  880 +++----
 llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll |  100 +-
 llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll    |  132 +-
 llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll        |  192 +-
 llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll   |  136 +-
 llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll       |  192 +-
 llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll    |  132 +-
 llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll        |  192 +-
 llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll   |  146 +-
 llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll       |  192 +-
 llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll    |  134 +-
 llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll        |  184 +-
 llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll   |  100 +-
 llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll  |   96 +-
 llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll      |  880 +++----
 llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll   |   32 +-
 llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll       |   28 +-
 llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll   |   32 +-
 llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll       |   28 +-
 llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll     |  272 +-
 llvm/test/CodeGen/RISCV/rvv/vor-vp.ll         |  418 +--
 .../rvv/vp-reverse-float-fixed-vectors.ll     |    8 +-
 .../CodeGen/RISCV/rvv/vp-reverse-float.ll     |   32 +-
 .../RISCV/rvv/vp-reverse-int-fixed-vectors.ll |   16 +-
 llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll |   68 +-
 .../rvv/vp-reverse-mask-fixed-vectors.ll      |   16 +-
 .../test/CodeGen/RISCV/rvv/vp-reverse-mask.ll |   28 +-
 .../RISCV/rvv/vp-splice-fixed-vectors.ll      |   48 +-
 .../RISCV/rvv/vp-splice-mask-fixed-vectors.ll |   32 +-
 .../RISCV/rvv/vp-splice-mask-vectors.ll       |   56 +-
 llvm/test/CodeGen/RISCV/rvv/vp-splice.ll      |   70 +-
 .../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll |   28 +-
 llvm/test/CodeGen/RISCV/rvv/vpload.ll         |   32 +-
 llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll |  100 +-
 .../CodeGen/RISCV/rvv/vpscatter-sdnode.ll     |   28 +-
 llvm/test/CodeGen/RISCV/rvv/vpstore.ll        |    4 +-
 llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll    |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll        |  176 +-
 llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll   |  120 +-
 llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll       |  176 +-
 llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll   |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll       |  308 +--
 llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll   |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll       |  434 +---
 llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll  |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll      |  434 +---
 llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll     |   16 +-
 llvm/test/CodeGen/RISCV/rvv/vselect-int.ll    |   96 +-
 llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll    |  124 +-
 llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll        |  396 +--
 llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll     |   12 +-
 llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll     |   40 +-
 llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll    |   40 +-
 llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll    |   12 +-
 llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll    |  108 +-
 llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll        |  396 +--
 llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll    |  108 +-
 llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll        |  396 +--
 llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll   |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll       |  434 +---
 llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll  |   88 +-
 llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll      |  434 +---
 llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll    |   98 +-
 llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll        |  184 +-
 llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll   |    2 +-
 llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll      |   12 +-
 llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll    |   24 +-
 llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll     |   12 +-
 llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll    |  268 +-
 llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll        |  644 +----
 243 files changed, 11558 insertions(+), 29849 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 8898ce509ecb7a..eb74e2d302f1a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -24,9 +24,7 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -50,9 +48,7 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -76,9 +72,7 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -102,9 +96,7 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -128,9 +120,7 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -154,9 +144,7 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -180,9 +168,7 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -206,9 +192,7 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -232,9 +216,7 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -258,9 +240,7 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -284,9 +264,7 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -310,9 +288,7 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -336,9 +312,7 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -362,9 +336,7 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -388,9 +360,7 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -414,9 +384,7 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -440,9 +408,7 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -466,9 +432,7 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -492,9 +456,7 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -518,9 +480,7 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -544,9 +504,7 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -570,9 +528,7 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -596,9 +552,7 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -675,8 +629,6 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i3
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 66eab2f6536224..879dff4a6e490b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -70,9 +70,7 @@ define <vscale x 1 x i8> @vp_bitreverse_nxv1i8_unmasked(<vscale x 1 x i8> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.bitreverse.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.bitreverse.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -138,9 +136,7 @@ define <vscale x 2 x i8> @vp_bitreverse_nxv2i8_unmasked(<vscale x 2 x i8> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.bitreverse.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.bitreverse.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -206,9 +202,7 @@ define <vscale x 4 x i8> @vp_bitreverse_nxv4i8_unmasked(<vscale x 4 x i8> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.bitreverse.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.bitreverse.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -274,9 +268,7 @@ define <vscale x 8 x i8> @vp_bitreverse_nxv8i8_unmasked(<vscale x 8 x i8> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.bitreverse.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.bitreverse.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -342,9 +334,7 @@ define <vscale x 16 x i8> @vp_bitreverse_nxv16i8_unmasked(<vscale x 16 x i8> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.bitreverse.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.bitreverse.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -410,9 +400,7 @@ define <vscale x 32 x i8> @vp_bitreverse_nxv32i8_unmasked(<vscale x 32 x i8> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.bitreverse.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.bitreverse.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -478,9 +466,7 @@ define <vscale x 64 x i8> @vp_bitreverse_nxv64i8_unmasked(<vscale x 64 x i8> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.bitreverse.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.bitreverse.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -560,9 +546,7 @@ define <vscale x 1 x i16> @vp_bitreverse_nxv1i16_unmasked(<vscale x 1 x i16> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.bitreverse.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.bitreverse.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -642,9 +626,7 @@ define <vscale x 2 x i16> @vp_bitreverse_nxv2i16_unmasked(<vscale x 2 x i16> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.bitreverse.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.bitreverse.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -724,9 +706,7 @@ define <vscale x 4 x i16> @vp_bitreverse_nxv4i16_unmasked(<vscale x 4 x i16> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.bitreverse.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.bitreverse.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -806,9 +786,7 @@ define <vscale x 8 x i16> @vp_bitreverse_nxv8i16_unmasked(<vscale x 8 x i16> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.bitreverse.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.bitreverse.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -888,9 +866,7 @@ define <vscale x 16 x i16> @vp_bitreverse_nxv16i16_unmasked(<vscale x 16 x i16>
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.bitreverse.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.bitreverse.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -970,9 +946,7 @@ define <vscale x 32 x i16> @vp_bitreverse_nxv32i16_unmasked(<vscale x 32 x i16>
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.bitreverse.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.bitreverse.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1068,9 +1042,7 @@ define <vscale x 1 x i32> @vp_bitreverse_nxv1i32_unmasked(<vscale x 1 x i32> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.bitreverse.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.bitreverse.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1166,9 +1138,7 @@ define <vscale x 2 x i32> @vp_bitreverse_nxv2i32_unmasked(<vscale x 2 x i32> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.bitreverse.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.bitreverse.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1264,9 +1234,7 @@ define <vscale x 4 x i32> @vp_bitreverse_nxv4i32_unmasked(<vscale x 4 x i32> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.bitreverse.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.bitreverse.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1362,9 +1330,7 @@ define <vscale x 8 x i32> @vp_bitreverse_nxv8i32_unmasked(<vscale x 8 x i32> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.bitreverse.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.bitreverse.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1460,9 +1426,7 @@ define <vscale x 16 x i32> @vp_bitreverse_nxv16i32_unmasked(<vscale x 16 x i32>
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.bitreverse.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.bitreverse.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1746,9 +1710,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.bitreverse.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.bitreverse.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2032,9 +1994,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.bitreverse.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.bitreverse.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2318,9 +2278,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.bitreverse.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.bitreverse.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2678,9 +2636,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.bitreverse.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.bitreverse.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -3038,9 +2994,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.bitreverse.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.bitreverse.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -3237,9 +3191,7 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16_unmasked(<vscale x 64 x i16>
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vbrev.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i16> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 800dc7ec388591..800d06c5a78f54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -42,9 +42,7 @@ define <vscale x 1 x i16> @vp_bswap_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.bswap.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.bswap.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -82,9 +80,7 @@ define <vscale x 2 x i16> @vp_bswap_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.bswap.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.bswap.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -122,9 +118,7 @@ define <vscale x 4 x i16> @vp_bswap_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.bswap.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.bswap.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -162,9 +156,7 @@ define <vscale x 8 x i16> @vp_bswap_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.bswap.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.bswap.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -202,9 +194,7 @@ define <vscale x 16 x i16> @vp_bswap_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.bswap.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.bswap.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -242,9 +232,7 @@ define <vscale x 32 x i16> @vp_bswap_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.bswap.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.bswap.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -298,9 +286,7 @@ define <vscale x 1 x i32> @vp_bswap_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.bswap.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.bswap.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -354,9 +340,7 @@ define <vscale x 2 x i32> @vp_bswap_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.bswap.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.bswap.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -410,9 +394,7 @@ define <vscale x 4 x i32> @vp_bswap_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.bswap.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.bswap.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -466,9 +448,7 @@ define <vscale x 8 x i32> @vp_bswap_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.bswap.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.bswap.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -522,9 +502,7 @@ define <vscale x 16 x i32> @vp_bswap_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.bswap.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.bswap.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -694,9 +672,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.bswap.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.bswap.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -866,9 +842,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.bswap.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.bswap.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1038,9 +1012,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.bswap.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.bswap.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1283,9 +1255,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.bswap.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.bswap.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -1528,9 +1498,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.bswap.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.bswap.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1655,9 +1623,7 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16_unmasked(<vscale x 64 x i16> %va,
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vrev8.v v8, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i16> @llvm.vp.bswap.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i16> @llvm.vp.bswap.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i16> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index edc348ebc68ff3..5b271606f08abe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -42,9 +42,7 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -86,9 +84,7 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -130,9 +126,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -176,9 +170,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -222,9 +214,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -268,9 +258,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -312,9 +300,7 @@ define <vscale x 1 x float> @vp_ceil_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.ceil.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.ceil.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -356,9 +342,7 @@ define <vscale x 2 x float> @vp_ceil_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.ceil.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.ceil.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -402,9 +386,7 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.ceil.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -448,9 +430,7 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.ceil.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -494,9 +474,7 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.ceil.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -538,9 +516,7 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.ceil.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.ceil.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -584,9 +560,7 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -630,9 +604,7 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -676,9 +648,7 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -722,9 +692,7 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -837,8 +805,6 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x doubl
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll b/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll
index 7831512c658e7c..4c40b7c74451d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll
@@ -9,9 +9,7 @@ define <vscale x 8 x i1> @not_icmp_sle_nxv8i16(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
 ; CHECK-NEXT:    ret
   %icmp = icmp sle <vscale x 8 x i16> %a, %b
-  %tmp = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %ones = shufflevector <vscale x 8 x i1> %tmp, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %ones, %icmp
+  %not = xor <vscale x 8 x i1> splat (i1 true), %icmp
   ret <vscale x 8 x i1> %not
 }
 
@@ -22,9 +20,7 @@ define <vscale x 4 x i1> @not_icmp_sgt_nxv4i32(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
 ; CHECK-NEXT:    ret
   %icmp = icmp sgt <vscale x 4 x i32> %a, %b
-  %tmp = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %ones = shufflevector <vscale x 4 x i1> %tmp, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %icmp, %ones
+  %not = xor <vscale x 4 x i1> %icmp, splat (i1 true)
   ret <vscale x 4 x i1> %not
 }
 
@@ -35,9 +31,7 @@ define <vscale x 2 x i1> @not_fcmp_une_nxv2f64(<vscale x 2 x double> %a, <vscale
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
 ; CHECK-NEXT:    ret
   %icmp = fcmp une <vscale x 2 x double> %a, %b
-  %tmp = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %ones = shufflevector <vscale x 2 x i1> %tmp, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %icmp, %ones
+  %not = xor <vscale x 2 x i1> %icmp, splat (i1 true)
   ret <vscale x 2 x i1> %not
 }
 
@@ -48,8 +42,6 @@ define <vscale x 4 x i1> @not_fcmp_uge_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
 ; CHECK-NEXT:    ret
   %icmp = fcmp uge <vscale x 4 x float> %a, %b
-  %tmp = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %ones = shufflevector <vscale x 4 x i1> %tmp, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %icmp, %ones
+  %not = xor <vscale x 4 x i1> %icmp, splat (i1 true)
   ret <vscale x 4 x i1> %not
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
index 46bedcd4e96662..8f917becafec01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
@@ -25,12 +25,8 @@ define <vscale x 2 x i64> @add_umax_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
-  %splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %ins2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> %splat1)
-  %v2 = add <vscale x 2 x i64> %v1, %splat2
+  %v1 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a0, <vscale x 2 x i64> splat (i64 7))
+  %v2 = add <vscale x 2 x i64> %v1, splat (i64 -7)
   ret <vscale x 2 x i64> %v2
 }
 
@@ -162,12 +158,8 @@ define <vscale x 2 x i64> @vselect_add_const_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %cm1 = insertelement <vscale x 2 x i64> poison, i64 -6, i32 0
-  %splatcm1 = shufflevector <vscale x 2 x i64> %cm1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %nc = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %splatnc = shufflevector <vscale x 2 x i64> %nc, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = add <vscale x 2 x i64> %a0, %splatcm1
-  %cmp = icmp ugt <vscale x 2 x i64> %a0, %splatnc
+  %v1 = add <vscale x 2 x i64> %a0, splat (i64 -6)
+  %cmp = icmp ugt <vscale x 2 x i64> %a0, splat (i64 5)
   %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> zeroinitializer
   ret <vscale x 2 x i64> %v2
 }
@@ -194,12 +186,8 @@ define <vscale x 2 x i16> @vselect_add_const_signbit_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %cm1 = insertelement <vscale x 2 x i16> poison, i16 32766, i32 0
-  %splatcm1 = shufflevector <vscale x 2 x i16> %cm1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %nc = insertelement <vscale x 2 x i16> poison, i16 -32767, i32 0
-  %splatnc = shufflevector <vscale x 2 x i16> %nc, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i16> %a0, %splatcm1
-  %v1 = add <vscale x 2 x i16> %a0, %splatnc
+  %cmp = icmp ugt <vscale x 2 x i16> %a0, splat (i16 32766)
+  %v1 = add <vscale x 2 x i16> %a0, splat (i16 -32767)
   %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %v1, <vscale x 2 x i16> zeroinitializer
   ret <vscale x 2 x i16> %v2
 }
@@ -227,9 +215,7 @@ define <vscale x 2 x i16> @vselect_xor_const_signbit_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
   %cmp = icmp slt <vscale x 2 x i16> %a0, zeroinitializer
-  %ins = insertelement <vscale x 2 x i16> poison, i16 -32768, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = xor <vscale x 2 x i16> %a0, %splat
+  %v1 = xor <vscale x 2 x i16> %a0, splat (i16 -32768)
   %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %v1, <vscale x 2 x i16> zeroinitializer
   ret <vscale x 2 x i16> %v2
 }
@@ -259,9 +245,7 @@ define <vscale x 2 x i64> @vselect_add_nxv2i64(<vscale x 2 x i64> %a0, <vscale x
 ; CHECK-NEXT:    ret
   %v1 = add <vscale x 2 x i64> %a0, %a1
   %cmp = icmp ule <vscale x 2 x i64> %a0, %v1
-  %allones = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splatallones = shufflevector <vscale x 2 x i64> %allones, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %splatallones
+  %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> splat (i64 -1)
   ret <vscale x 2 x i64> %v2
 }
 
@@ -286,15 +270,9 @@ define <vscale x 2 x i64> @vselect_add_const_2_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %cm1 = insertelement <vscale x 2 x i64> poison, i64 6, i32 0
-  %splatcm1 = shufflevector <vscale x 2 x i64> %cm1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %nc = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splatnc = shufflevector <vscale x 2 x i64> %nc, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = add <vscale x 2 x i64> %a0, %splatcm1
-  %cmp = icmp ule <vscale x 2 x i64> %a0, %splatnc
-  %allones = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splatallones = shufflevector <vscale x 2 x i64> %allones, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %splatallones
+  %v1 = add <vscale x 2 x i64> %a0, splat (i64 6)
+  %cmp = icmp ule <vscale x 2 x i64> %a0, splat (i64 -7)
+  %v2 = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %v1, <vscale x 2 x i64> splat (i64 -1)
   ret <vscale x 2 x i64> %v2
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
index 80b6c8ced3a459..5a67d0fbebf31c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
@@ -10,12 +10,8 @@ define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 8
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 4 x i32> poison, i32 255, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %ins1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 4 x i32> poison, i32 8, i32 0
-  %splat2 = shufflevector <vscale x 4 x i32> %ins2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v1 = or <vscale x 4 x i32> %A, %splat1
-  %v2 = and <vscale x 4 x i32> %v1, %splat2
+  %v1 = or <vscale x 4 x i32> %A, splat (i32 255)
+  %v2 = and <vscale x 4 x i32> %v1, splat (i32 8)
   ret <vscale x 4 x i32> %v2
 }
 
@@ -28,12 +24,8 @@ define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-NEXT:    vor.vi v8, v8, 3
 ; CHECK-NEXT:    vand.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
-  %splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %ins2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = and <vscale x 2 x i64> %a0, %splat1
-  %v2 = or <vscale x 2 x i64> %v1, %splat2
+  %v1 = and <vscale x 2 x i64> %a0, splat (i64 7)
+  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
   ret <vscale x 2 x i64> %v2
 }
 
@@ -45,12 +37,8 @@ define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
-  %splat1 = shufflevector <vscale x 2 x i64> %ins1, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %ins2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = and <vscale x 2 x i64> %a0, %splat1
-  %v2 = or <vscale x 2 x i64> %v1, %splat2
+  %v1 = and <vscale x 2 x i64> %a0, splat (i64 1)
+  %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
   ret <vscale x 2 x i64> %v2
 }
 
@@ -62,12 +50,8 @@ define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %ins1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
-  %splat2 = shufflevector <vscale x 4 x i32> %ins2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v1 = shl <vscale x 4 x i32> %x, %splat1
-  %v2 = shl <vscale x 4 x i32> %v1, %splat2
+  %v1 = shl <vscale x 4 x i32> %x, splat (i32 2)
+  %v2 = shl <vscale x 4 x i32> %v1, splat (i32 4)
   ret <vscale x 4 x i32> %v2
 }
 
@@ -79,12 +63,8 @@ define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %splat1 = shufflevector <vscale x 2 x i32> %ins1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
-  %splat2 = shufflevector <vscale x 2 x i32> %ins2, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v1 = ashr <vscale x 2 x i32> %x, %splat1
-  %v2 = ashr <vscale x 2 x i32> %v1, %splat2
+  %v1 = ashr <vscale x 2 x i32> %x, splat (i32 2)
+  %v2 = ashr <vscale x 2 x i32> %v1, splat (i32 4)
   ret <vscale x 2 x i32> %v2
 }
 
@@ -96,12 +76,8 @@ define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %ins1 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat1 = shufflevector <vscale x 8 x i16> %ins1, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %ins2 = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
-  %splat2 = shufflevector <vscale x 8 x i16> %ins2, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v1 = lshr <vscale x 8 x i16> %x, %splat2
-  %v2 = lshr <vscale x 8 x i16> %v1, %splat2
+  %v1 = lshr <vscale x 8 x i16> %x, splat (i16 4)
+  %v2 = lshr <vscale x 8 x i16> %v1, splat (i16 4)
   ret <vscale x 8 x i16> %v2
 }
 
@@ -110,9 +86,7 @@ define <vscale x 2 x float> @combine_fmul_one(<vscale x 2 x float> %x) {
 ; CHECK-LABEL: combine_fmul_one:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x float> poison, float 1.0, i32 0
-  %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = fmul <vscale x 2 x float> %x, %splat
+  %v = fmul <vscale x 2 x float> %x, splat (float 1.0)
   ret <vscale x 2 x float> %v
 }
 
@@ -121,8 +95,6 @@ define <vscale x 2 x float> @combine_fmul_one_commuted(<vscale x 2 x float> %x)
 ; CHECK-LABEL: combine_fmul_one_commuted:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %ins = insertelement <vscale x 2 x float> poison, float 1.0, i32 0
-  %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = fmul <vscale x 2 x float> %splat, %x
+  %v = fmul <vscale x 2 x float> splat (float 1.0), %x
   ret <vscale x 2 x float> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
index b3f561a52f4114..05747ff0d049a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
@@ -21,9 +21,7 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 0
 ; CHECK-NEXT:    ret
-  %v41 = insertelement <2 x i32> poison, i32 16, i32 0
-  %v42 = shufflevector <2 x i32> %v41, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v43 = lshr <2 x i32> %x, %v42
+  %v43 = lshr <2 x i32> %x, splat (i32 16)
   %v44 = trunc <2 x i32> %v43 to <2 x i16>
   %v45 =  insertelement <2 x i32> poison, i32 -32768, i32 0
   %v46 = shufflevector <2 x i32> %v45, <2 x i32> poison, <2 x i32> zeroinitializer
@@ -40,9 +38,7 @@ define <vscale x 2 x i16> @scalable(<vscale x 2 x i32> %x) {
 ; CHECK-NEXT:    lui a0, 1048568
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %v41 = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %v42 = shufflevector <vscale x 2 x i32> %v41, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v43 = lshr <vscale x 2 x i32> %x, %v42
+  %v43 = lshr <vscale x 2 x i32> %x, splat (i32 16)
   %v44 = trunc <vscale x 2 x i32> %v43 to <vscale x 2 x i16>
   %v45 =  insertelement <vscale x 2 x i32> poison, i32 -32768, i32 0
   %v46 = shufflevector <vscale x 2 x i32> %v45, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index e4f030a642f7d1..2a75e5ce7175d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -57,9 +57,7 @@ define <vscale x 1 x i8> @vp_ctlz_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ctlz.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ctlz.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -112,9 +110,7 @@ define <vscale x 2 x i8> @vp_ctlz_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ctlz.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ctlz.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -167,9 +163,7 @@ define <vscale x 4 x i8> @vp_ctlz_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ctlz.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ctlz.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -222,9 +216,7 @@ define <vscale x 8 x i8> @vp_ctlz_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -277,9 +269,7 @@ define <vscale x 16 x i8> @vp_ctlz_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -349,9 +339,7 @@ define <vscale x 32 x i8> @vp_ctlz_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ctlz.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ctlz.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -421,9 +409,7 @@ define <vscale x 64 x i8> @vp_ctlz_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ctlz.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ctlz.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -470,9 +456,7 @@ define <vscale x 1 x i16> @vp_ctlz_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -519,9 +503,7 @@ define <vscale x 2 x i16> @vp_ctlz_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -568,9 +550,7 @@ define <vscale x 4 x i16> @vp_ctlz_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -617,9 +597,7 @@ define <vscale x 8 x i16> @vp_ctlz_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -666,9 +644,7 @@ define <vscale x 16 x i16> @vp_ctlz_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -756,9 +732,7 @@ define <vscale x 32 x i16> @vp_ctlz_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ctlz.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ctlz.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -807,9 +781,7 @@ define <vscale x 1 x i32> @vp_ctlz_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -858,9 +830,7 @@ define <vscale x 2 x i32> @vp_ctlz_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -909,9 +879,7 @@ define <vscale x 4 x i32> @vp_ctlz_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -960,9 +928,7 @@ define <vscale x 8 x i32> @vp_ctlz_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1010,9 +976,7 @@ define <vscale x 16 x i32> @vp_ctlz_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ctlz.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ctlz.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1062,9 +1026,7 @@ define <vscale x 1 x i64> @vp_ctlz_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1114,9 +1076,7 @@ define <vscale x 2 x i64> @vp_ctlz_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1166,9 +1126,7 @@ define <vscale x 4 x i64> @vp_ctlz_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1218,9 +1176,7 @@ define <vscale x 7 x i64> @vp_ctlz_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.ctlz.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.ctlz.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -1270,9 +1226,7 @@ define <vscale x 8 x i64> @vp_ctlz_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1386,9 +1340,7 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
 
@@ -1435,9 +1387,7 @@ define <vscale x 1 x i8> @vp_ctlz_zero_undef_nxv1i8_unmasked(<vscale x 1 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ctlz.nxv1i8(<vscale x 1 x i8> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ctlz.nxv1i8(<vscale x 1 x i8> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -1485,9 +1435,7 @@ define <vscale x 2 x i8> @vp_ctlz_zero_undef_nxv2i8_unmasked(<vscale x 2 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ctlz.nxv2i8(<vscale x 2 x i8> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ctlz.nxv2i8(<vscale x 2 x i8> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -1535,9 +1483,7 @@ define <vscale x 4 x i8> @vp_ctlz_zero_undef_nxv4i8_unmasked(<vscale x 4 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ctlz.nxv4i8(<vscale x 4 x i8> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ctlz.nxv4i8(<vscale x 4 x i8> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -1585,9 +1531,7 @@ define <vscale x 8 x i8> @vp_ctlz_zero_undef_nxv8i8_unmasked(<vscale x 8 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -1635,9 +1579,7 @@ define <vscale x 16 x i8> @vp_ctlz_zero_undef_nxv16i8_unmasked(<vscale x 16 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -1706,9 +1648,7 @@ define <vscale x 32 x i8> @vp_ctlz_zero_undef_nxv32i8_unmasked(<vscale x 32 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ctlz.nxv32i8(<vscale x 32 x i8> %va, i1 true, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ctlz.nxv32i8(<vscale x 32 x i8> %va, i1 true, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -1777,9 +1717,7 @@ define <vscale x 64 x i8> @vp_ctlz_zero_undef_nxv64i8_unmasked(<vscale x 64 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ctlz.nxv64i8(<vscale x 64 x i8> %va, i1 true, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ctlz.nxv64i8(<vscale x 64 x i8> %va, i1 true, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -1821,9 +1759,7 @@ define <vscale x 1 x i16> @vp_ctlz_zero_undef_nxv1i16_unmasked(<vscale x 1 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -1865,9 +1801,7 @@ define <vscale x 2 x i16> @vp_ctlz_zero_undef_nxv2i16_unmasked(<vscale x 2 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1909,9 +1843,7 @@ define <vscale x 4 x i16> @vp_ctlz_zero_undef_nxv4i16_unmasked(<vscale x 4 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1953,9 +1885,7 @@ define <vscale x 8 x i16> @vp_ctlz_zero_undef_nxv8i16_unmasked(<vscale x 8 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1997,9 +1927,7 @@ define <vscale x 16 x i16> @vp_ctlz_zero_undef_nxv16i16_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -2086,9 +2014,7 @@ define <vscale x 32 x i16> @vp_ctlz_zero_undef_nxv32i16_unmasked(<vscale x 32 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ctlz.nxv32i16(<vscale x 32 x i16> %va, i1 true, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ctlz.nxv32i16(<vscale x 32 x i16> %va, i1 true, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -2132,9 +2058,7 @@ define <vscale x 1 x i32> @vp_ctlz_zero_undef_nxv1i32_unmasked(<vscale x 1 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -2178,9 +2102,7 @@ define <vscale x 2 x i32> @vp_ctlz_zero_undef_nxv2i32_unmasked(<vscale x 2 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -2224,9 +2146,7 @@ define <vscale x 4 x i32> @vp_ctlz_zero_undef_nxv4i32_unmasked(<vscale x 4 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -2270,9 +2190,7 @@ define <vscale x 8 x i32> @vp_ctlz_zero_undef_nxv8i32_unmasked(<vscale x 8 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -2315,9 +2233,7 @@ define <vscale x 16 x i32> @vp_ctlz_zero_undef_nxv16i32_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ctlz.nxv16i32(<vscale x 16 x i32> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ctlz.nxv16i32(<vscale x 16 x i32> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -2362,9 +2278,7 @@ define <vscale x 1 x i64> @vp_ctlz_zero_undef_nxv1i64_unmasked(<vscale x 1 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2409,9 +2323,7 @@ define <vscale x 2 x i64> @vp_ctlz_zero_undef_nxv2i64_unmasked(<vscale x 2 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2456,9 +2368,7 @@ define <vscale x 4 x i64> @vp_ctlz_zero_undef_nxv4i64_unmasked(<vscale x 4 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2503,9 +2413,7 @@ define <vscale x 7 x i64> @vp_ctlz_zero_undef_nxv7i64_unmasked(<vscale x 7 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.ctlz.nxv7i64(<vscale x 7 x i64> %va, i1 true, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.ctlz.nxv7i64(<vscale x 7 x i64> %va, i1 true, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -2550,9 +2458,7 @@ define <vscale x 8 x i64> @vp_ctlz_zero_undef_nxv8i64_unmasked(<vscale x 8 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2658,9 +2564,7 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vclz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 2310f85b1fba93..883f68aec1f422 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -60,9 +60,7 @@ define <vscale x 1 x i8> @vp_ctpop_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 ze
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -118,9 +116,7 @@ define <vscale x 2 x i8> @vp_ctpop_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 ze
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -176,9 +172,7 @@ define <vscale x 4 x i8> @vp_ctpop_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 ze
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -234,9 +228,7 @@ define <vscale x 8 x i8> @vp_ctpop_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 ze
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -292,9 +284,7 @@ define <vscale x 16 x i8> @vp_ctpop_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -350,9 +340,7 @@ define <vscale x 32 x i8> @vp_ctpop_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -408,9 +396,7 @@ define <vscale x 64 x i8> @vp_ctpop_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -480,9 +466,7 @@ define <vscale x 1 x i16> @vp_ctpop_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -552,9 +536,7 @@ define <vscale x 2 x i16> @vp_ctpop_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -624,9 +606,7 @@ define <vscale x 4 x i16> @vp_ctpop_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -696,9 +676,7 @@ define <vscale x 8 x i16> @vp_ctpop_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -768,9 +746,7 @@ define <vscale x 16 x i16> @vp_ctpop_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -840,9 +816,7 @@ define <vscale x 32 x i16> @vp_ctpop_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -914,9 +888,7 @@ define <vscale x 1 x i32> @vp_ctpop_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -988,9 +960,7 @@ define <vscale x 2 x i32> @vp_ctpop_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1062,9 +1032,7 @@ define <vscale x 4 x i32> @vp_ctpop_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1136,9 +1104,7 @@ define <vscale x 8 x i32> @vp_ctpop_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1210,9 +1176,7 @@ define <vscale x 16 x i32> @vp_ctpop_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1378,9 +1342,7 @@ define <vscale x 1 x i64> @vp_ctpop_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1546,9 +1508,7 @@ define <vscale x 2 x i64> @vp_ctpop_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1714,9 +1674,7 @@ define <vscale x 4 x i64> @vp_ctpop_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1882,9 +1840,7 @@ define <vscale x 7 x i64> @vp_ctpop_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -2050,9 +2006,7 @@ define <vscale x 8 x i64> @vp_ctpop_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2581,9 +2535,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vcpop.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 145ce6e917f962..ef8a6c704a44b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -68,9 +68,7 @@ define <vscale x 1 x i8> @vp_cttz_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.cttz.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.cttz.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -134,9 +132,7 @@ define <vscale x 2 x i8> @vp_cttz_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.cttz.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.cttz.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -200,9 +196,7 @@ define <vscale x 4 x i8> @vp_cttz_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.cttz.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.cttz.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -266,9 +260,7 @@ define <vscale x 8 x i8> @vp_cttz_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -332,9 +324,7 @@ define <vscale x 16 x i8> @vp_cttz_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -398,9 +388,7 @@ define <vscale x 32 x i8> @vp_cttz_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.cttz.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.cttz.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -464,9 +452,7 @@ define <vscale x 64 x i8> @vp_cttz_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.cttz.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.cttz.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -544,9 +530,7 @@ define <vscale x 1 x i16> @vp_cttz_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -624,9 +608,7 @@ define <vscale x 2 x i16> @vp_cttz_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -704,9 +686,7 @@ define <vscale x 4 x i16> @vp_cttz_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -784,9 +764,7 @@ define <vscale x 8 x i16> @vp_cttz_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -864,9 +842,7 @@ define <vscale x 16 x i16> @vp_cttz_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -944,9 +920,7 @@ define <vscale x 32 x i16> @vp_cttz_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.cttz.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.cttz.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1026,9 +1000,7 @@ define <vscale x 1 x i32> @vp_cttz_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1108,9 +1080,7 @@ define <vscale x 2 x i32> @vp_cttz_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1190,9 +1160,7 @@ define <vscale x 4 x i32> @vp_cttz_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1272,9 +1240,7 @@ define <vscale x 8 x i32> @vp_cttz_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1354,9 +1320,7 @@ define <vscale x 16 x i32> @vp_cttz_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.cttz.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.cttz.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1538,9 +1502,7 @@ define <vscale x 1 x i64> @vp_cttz_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1722,9 +1684,7 @@ define <vscale x 2 x i64> @vp_cttz_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1906,9 +1866,7 @@ define <vscale x 4 x i64> @vp_cttz_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2090,9 +2048,7 @@ define <vscale x 7 x i64> @vp_cttz_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.cttz.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.cttz.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -2274,9 +2230,7 @@ define <vscale x 8 x i64> @vp_cttz_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2855,9 +2809,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
 
@@ -2910,9 +2862,7 @@ define <vscale x 1 x i8> @vp_cttz_zero_undef_nxv1i8_unmasked(<vscale x 1 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.cttz.nxv1i8(<vscale x 1 x i8> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.cttz.nxv1i8(<vscale x 1 x i8> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -2966,9 +2916,7 @@ define <vscale x 2 x i8> @vp_cttz_zero_undef_nxv2i8_unmasked(<vscale x 2 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.cttz.nxv2i8(<vscale x 2 x i8> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.cttz.nxv2i8(<vscale x 2 x i8> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -3022,9 +2970,7 @@ define <vscale x 4 x i8> @vp_cttz_zero_undef_nxv4i8_unmasked(<vscale x 4 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.cttz.nxv4i8(<vscale x 4 x i8> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.cttz.nxv4i8(<vscale x 4 x i8> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -3078,9 +3024,7 @@ define <vscale x 8 x i8> @vp_cttz_zero_undef_nxv8i8_unmasked(<vscale x 8 x i8> %
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -3134,9 +3078,7 @@ define <vscale x 16 x i8> @vp_cttz_zero_undef_nxv16i8_unmasked(<vscale x 16 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -3199,9 +3141,7 @@ define <vscale x 32 x i8> @vp_cttz_zero_undef_nxv32i8_unmasked(<vscale x 32 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.cttz.nxv32i8(<vscale x 32 x i8> %va, i1 true, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.cttz.nxv32i8(<vscale x 32 x i8> %va, i1 true, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -3264,9 +3204,7 @@ define <vscale x 64 x i8> @vp_cttz_zero_undef_nxv64i8_unmasked(<vscale x 64 x i8
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.cttz.nxv64i8(<vscale x 64 x i8> %va, i1 true, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.cttz.nxv64i8(<vscale x 64 x i8> %va, i1 true, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -3312,9 +3250,7 @@ define <vscale x 1 x i16> @vp_cttz_zero_undef_nxv1i16_unmasked(<vscale x 1 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -3360,9 +3296,7 @@ define <vscale x 2 x i16> @vp_cttz_zero_undef_nxv2i16_unmasked(<vscale x 2 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -3408,9 +3342,7 @@ define <vscale x 4 x i16> @vp_cttz_zero_undef_nxv4i16_unmasked(<vscale x 4 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -3456,9 +3388,7 @@ define <vscale x 8 x i16> @vp_cttz_zero_undef_nxv8i16_unmasked(<vscale x 8 x i16
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -3504,9 +3434,7 @@ define <vscale x 16 x i16> @vp_cttz_zero_undef_nxv16i16_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -3583,9 +3511,7 @@ define <vscale x 32 x i16> @vp_cttz_zero_undef_nxv32i16_unmasked(<vscale x 32 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.cttz.nxv32i16(<vscale x 32 x i16> %va, i1 true, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.cttz.nxv32i16(<vscale x 32 x i16> %va, i1 true, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -3633,9 +3559,7 @@ define <vscale x 1 x i32> @vp_cttz_zero_undef_nxv1i32_unmasked(<vscale x 1 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -3683,9 +3607,7 @@ define <vscale x 2 x i32> @vp_cttz_zero_undef_nxv2i32_unmasked(<vscale x 2 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -3733,9 +3655,7 @@ define <vscale x 4 x i32> @vp_cttz_zero_undef_nxv4i32_unmasked(<vscale x 4 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -3783,9 +3703,7 @@ define <vscale x 8 x i32> @vp_cttz_zero_undef_nxv8i32_unmasked(<vscale x 8 x i32
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -3832,9 +3750,7 @@ define <vscale x 16 x i32> @vp_cttz_zero_undef_nxv16i32_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.cttz.nxv16i32(<vscale x 16 x i32> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.cttz.nxv16i32(<vscale x 16 x i32> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -3883,9 +3799,7 @@ define <vscale x 1 x i64> @vp_cttz_zero_undef_nxv1i64_unmasked(<vscale x 1 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 true, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 true, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -3934,9 +3848,7 @@ define <vscale x 2 x i64> @vp_cttz_zero_undef_nxv2i64_unmasked(<vscale x 2 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 true, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 true, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -3985,9 +3897,7 @@ define <vscale x 4 x i64> @vp_cttz_zero_undef_nxv4i64_unmasked(<vscale x 4 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 true, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 true, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -4036,9 +3946,7 @@ define <vscale x 7 x i64> @vp_cttz_zero_undef_nxv7i64_unmasked(<vscale x 7 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i64> @llvm.vp.cttz.nxv7i64(<vscale x 7 x i64> %va, i1 true, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i64> @llvm.vp.cttz.nxv7i64(<vscale x 7 x i64> %va, i1 true, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
 
@@ -4087,9 +3995,7 @@ define <vscale x 8 x i64> @vp_cttz_zero_undef_nxv8i64_unmasked(<vscale x 8 x i64
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 true, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 true, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -4227,9 +4133,7 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64_unmasked(<vscale x 16 x
 ; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-ZVBB-NEXT:    vctz.v v8, v8
 ; CHECK-ZVBB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index f1ac9aa53f5795..3b7952f9f5e6dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -523,9 +523,7 @@ define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fmv.w.x fa4, a0
 ; CHECK-NEXT:    fadd.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fadd <vscale x 4 x float> %x, %splat
+  %bo = fadd <vscale x 4 x float> %x, splat (float 3.0)
   %ext = extractelement <vscale x 4 x float> %bo, i32 2
   ret float %ext
 }
@@ -540,9 +538,7 @@ define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fmv.w.x fa4, a0
 ; CHECK-NEXT:    fsub.s fa0, fa4, fa5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fsub <vscale x 4 x float> %splat, %x
+  %bo = fsub <vscale x 4 x float> splat (float 3.0), %x
   %ext = extractelement <vscale x 4 x float> %bo, i32 1
   ret float %ext
 }
@@ -557,9 +553,7 @@ define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fmv.w.x fa4, a0
 ; CHECK-NEXT:    fmul.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fmul <vscale x 4 x float> %x, %splat
+  %bo = fmul <vscale x 4 x float> %x, splat (float 3.0)
   %ext = extractelement <vscale x 4 x float> %bo, i32 3
   ret float %ext
 }
@@ -573,9 +567,7 @@ define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
 ; CHECK-NEXT:    fmv.w.x fa4, a0
 ; CHECK-NEXT:    fdiv.s fa0, fa5, fa4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x float> poison, float 3.0, i32 0
-  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = fdiv <vscale x 4 x float> %x, %splat
+  %bo = fdiv <vscale x 4 x float> %x, splat (float 3.0)
   %ext = extractelement <vscale x 4 x float> %bo, i32 0
   ret float %ext
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index d9fdec3041cb06..df9949e617b807 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -753,9 +753,7 @@ define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    addi a0, a0, 3
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = add <vscale x 4 x i32> %x, %splat
+  %bo = add <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 2
   ret i32 %ext
 }
@@ -769,9 +767,7 @@ define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; CHECK-NEXT:    li a1, 3
 ; CHECK-NEXT:    sub a0, a1, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sub <vscale x 4 x i32> %splat, %x
+  %bo = sub <vscale x 4 x i32> splat (i32 3), %x
   %ext = extractelement <vscale x 4 x i32> %bo, i32 1
   ret i32 %ext
 }
@@ -795,9 +791,7 @@ define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV32M-NEXT:    slli a1, a0, 1
 ; RV32M-NEXT:    add a0, a1, a0
 ; RV32M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = mul <vscale x 4 x i32> %x, %splat
+  %bo = mul <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 3
   ret i32 %ext
 }
@@ -824,9 +818,7 @@ define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV32M-NEXT:    srli a1, a0, 31
 ; RV32M-NEXT:    add a0, a0, a1
 ; RV32M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sdiv <vscale x 4 x i32> %x, %splat
+  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
@@ -853,9 +845,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV32M-NEXT:    srli a1, a0, 31
 ; RV32M-NEXT:    add a0, a0, a1
 ; RV32M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sdiv <vscale x 4 x i32> %x, %splat
+  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index fcee77ae8d8e95..a96cf5807e6c16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -737,9 +737,7 @@ define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    addiw a0, a0, 3
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = add <vscale x 4 x i32> %x, %splat
+  %bo = add <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 2
   ret i32 %ext
 }
@@ -753,9 +751,7 @@ define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; CHECK-NEXT:    li a1, 3
 ; CHECK-NEXT:    subw a0, a1, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sub <vscale x 4 x i32> %splat, %x
+  %bo = sub <vscale x 4 x i32> splat (i32 3), %x
   %ext = extractelement <vscale x 4 x i32> %bo, i32 1
   ret i32 %ext
 }
@@ -779,9 +775,7 @@ define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV64M-NEXT:    slli a1, a0, 1
 ; RV64M-NEXT:    addw a0, a1, a0
 ; RV64M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = mul <vscale x 4 x i32> %x, %splat
+  %bo = mul <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 3
   ret i32 %ext
 }
@@ -809,9 +803,7 @@ define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV64M-NEXT:    srli a0, a0, 32
 ; RV64M-NEXT:    addw a0, a0, a1
 ; RV64M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sdiv <vscale x 4 x i32> %x, %splat
+  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
@@ -839,9 +831,7 @@ define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
 ; RV64M-NEXT:    srli a0, a0, 32
 ; RV64M-NEXT:    addw a0, a0, a1
 ; RV64M-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %bo = sdiv <vscale x 4 x i32> %x, %splat
+  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
   %ext = extractelement <vscale x 4 x i32> %bo, i32 0
   ret i32 %ext
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index c273dcdfbca17a..c0d366760d0797 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -24,9 +24,7 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -50,9 +48,7 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -76,9 +72,7 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -102,9 +96,7 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -128,9 +120,7 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -154,9 +144,7 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -180,9 +168,7 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -206,9 +192,7 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -232,9 +216,7 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -258,9 +240,7 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -284,9 +264,7 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -310,9 +288,7 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -336,9 +312,7 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -362,9 +336,7 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -388,9 +360,7 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -414,9 +384,7 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -440,9 +408,7 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -495,8 +461,6 @@ define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0
 ; CHECK-NEXT:    vmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 595c650ffb82a5..943fc58d637a00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -54,9 +54,7 @@ define <2 x i8> @vp_bitreverse_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.bitreverse.v2i8(<2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -110,9 +108,7 @@ define <4 x i8> @vp_bitreverse_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.bitreverse.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -166,9 +162,7 @@ define <8 x i8> @vp_bitreverse_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.bitreverse.v8i8(<8 x i8> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -222,9 +216,7 @@ define <16 x i8> @vp_bitreverse_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.bitreverse.v16i8(<16 x i8> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -292,9 +284,7 @@ define <2 x i16> @vp_bitreverse_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.bitreverse.v2i16(<2 x i16> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -362,9 +352,7 @@ define <4 x i16> @vp_bitreverse_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.bitreverse.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -432,9 +420,7 @@ define <8 x i16> @vp_bitreverse_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.bitreverse.v8i16(<8 x i16> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -502,9 +488,7 @@ define <16 x i16> @vp_bitreverse_v16i16_unmasked(<16 x i16> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v10, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.bitreverse.v16i16(<16 x i16> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -588,9 +572,7 @@ define <2 x i32> @vp_bitreverse_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.bitreverse.v2i32(<2 x i32> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -674,9 +656,7 @@ define <4 x i32> @vp_bitreverse_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -760,9 +740,7 @@ define <8 x i32> @vp_bitreverse_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v10, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.bitreverse.v8i32(<8 x i32> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -846,9 +824,7 @@ define <16 x i32> @vp_bitreverse_v16i32_unmasked(<16 x i32> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vor.vv v8, v12, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.bitreverse.v16i32(<16 x i32> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1120,9 +1096,7 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl)
 ; RV64-NEXT:    vadd.vv v8, v8, v8
 ; RV64-NEXT:    vor.vv v8, v9, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.bitreverse.v2i64(<2 x i64> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1394,9 +1368,7 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
 ; RV64-NEXT:    vadd.vv v8, v8, v8
 ; RV64-NEXT:    vor.vv v8, v10, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.bitreverse.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1668,9 +1640,7 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl)
 ; RV64-NEXT:    vadd.vv v8, v8, v8
 ; RV64-NEXT:    vor.vv v8, v12, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.bitreverse.v8i64(<8 x i64> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -2034,9 +2004,7 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
 ; RV64-NEXT:    vadd.vv v8, v8, v8
 ; RV64-NEXT:    vor.vv v8, v16, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.bitreverse.v15i64(<15 x i64> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -2400,9 +2368,7 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
 ; RV64-NEXT:    vadd.vv v8, v8, v8
 ; RV64-NEXT:    vor.vv v8, v16, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.bitreverse.v16i64(<16 x i64> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -2555,8 +2521,6 @@ define <128 x i16> @vp_bitreverse_v128i16_unmasked(<128 x i16> %va, i32 zeroext
 ; CHECK-NEXT:    vadd.vv v16, v16, v16
 ; CHECK-NEXT:    vor.vv v16, v24, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <128 x i1> %head, <128 x i1> poison, <128 x i32> zeroinitializer
-  %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
+  %v = call <128 x i16> @llvm.vp.bitreverse.v128i16(<128 x i16> %va, <128 x i1> splat (i1 true), i32 %evl)
   ret <128 x i16> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index 6308f73e219da1..f80d4e5c0d7c3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -26,9 +26,7 @@ define <2 x i16> @vp_bswap_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsll.vi v8, v8, 8
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.bswap.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.bswap.v2i16(<2 x i16> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -54,9 +52,7 @@ define <4 x i16> @vp_bswap_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsll.vi v8, v8, 8
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.bswap.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.bswap.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -82,9 +78,7 @@ define <8 x i16> @vp_bswap_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsll.vi v8, v8, 8
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.bswap.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.bswap.v8i16(<8 x i16> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -110,9 +104,7 @@ define <16 x i16> @vp_bswap_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsll.vi v8, v8, 8
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.bswap.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.bswap.v16i16(<16 x i16> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -154,9 +146,7 @@ define <2 x i32> @vp_bswap_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.bswap.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.bswap.v2i32(<2 x i32> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -198,9 +188,7 @@ define <4 x i32> @vp_bswap_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -242,9 +230,7 @@ define <8 x i32> @vp_bswap_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.bswap.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.bswap.v8i32(<8 x i32> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -286,9 +272,7 @@ define <16 x i32> @vp_bswap_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.bswap.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.bswap.v16i32(<16 x i32> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -446,9 +430,7 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vor.vv v8, v8, v10
 ; RV64-NEXT:    vor.vv v8, v9, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.bswap.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.bswap.v2i64(<2 x i64> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -606,9 +588,7 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vor.vv v8, v8, v12
 ; RV64-NEXT:    vor.vv v8, v10, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.bswap.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.bswap.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -766,9 +746,7 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vor.vv v8, v12, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.bswap.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.bswap.v8i64(<8 x i64> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -999,9 +977,7 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vor.vv v8, v8, v24
 ; RV64-NEXT:    vor.vv v8, v16, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.bswap.v15i64(<15 x i64> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -1232,9 +1208,7 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vor.vv v8, v8, v24
 ; RV64-NEXT:    vor.vv v8, v16, v8
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.bswap.v16i64(<16 x i64> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1315,8 +1289,6 @@ define <128 x i16> @vp_bswap_v128i16_unmasked(<128 x i16> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsll.vi v16, v16, 8
 ; CHECK-NEXT:    vor.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <128 x i1> %head, <128 x i1> poison, <128 x i32> zeroinitializer
-  %v = call <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16> %va, <128 x i1> %m, i32 %evl)
+  %v = call <128 x i16> @llvm.vp.bswap.v128i16(<128 x i16> %va, <128 x i1> splat (i1 true), i32 %evl)
   ret <128 x i16> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 194179f9f470e1..5d024f140fd5d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -86,9 +86,7 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -388,9 +380,7 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -432,9 +422,7 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.ceil.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.ceil.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -478,9 +466,7 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.ceil.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.ceil.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -524,9 +510,7 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.ceil.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.ceil.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -568,9 +552,7 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -614,9 +596,7 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -660,9 +640,7 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -706,9 +684,7 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -752,9 +728,7 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +841,6 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
index 36f22bd3259cf9..2f4539d5038c2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
@@ -58,9 +58,7 @@ define <2 x i8> @vp_ctlz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -118,9 +116,7 @@ define <4 x i8> @vp_ctlz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -178,9 +174,7 @@ define <8 x i8> @vp_ctlz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -238,9 +232,7 @@ define <16 x i8> @vp_ctlz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -316,9 +308,7 @@ define <2 x i16> @vp_ctlz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -394,9 +384,7 @@ define <4 x i16> @vp_ctlz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -472,9 +460,7 @@ define <8 x i16> @vp_ctlz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -550,9 +536,7 @@ define <16 x i16> @vp_ctlz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -634,9 +618,7 @@ define <2 x i32> @vp_ctlz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -718,9 +700,7 @@ define <4 x i32> @vp_ctlz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -802,9 +782,7 @@ define <8 x i32> @vp_ctlz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -886,9 +864,7 @@ define <16 x i32> @vp_ctlz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1098,9 +1074,7 @@ define <2 x i64> @vp_ctlz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1310,9 +1284,7 @@ define <4 x i64> @vp_ctlz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1522,9 +1494,7 @@ define <8 x i64> @vp_ctlz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1764,9 +1734,7 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -2006,9 +1974,7 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -2623,9 +2589,7 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vmul.vx v16, v16, a5
 ; RV64-NEXT:    vsrl.vx v16, v16, a6
 ; RV64-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -2681,9 +2645,7 @@ define <2 x i8> @vp_ctlz_zero_undef_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -2739,9 +2701,7 @@ define <4 x i8> @vp_ctlz_zero_undef_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -2797,9 +2757,7 @@ define <8 x i8> @vp_ctlz_zero_undef_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -2855,9 +2813,7 @@ define <16 x i8> @vp_ctlz_zero_undef_v16i8_unmasked(<16 x i8> %va, i32 zeroext %
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -2931,9 +2887,7 @@ define <2 x i16> @vp_ctlz_zero_undef_v2i16_unmasked(<2 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -3007,9 +2961,7 @@ define <4 x i16> @vp_ctlz_zero_undef_v4i16_unmasked(<4 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -3083,9 +3035,7 @@ define <8 x i16> @vp_ctlz_zero_undef_v8i16_unmasked(<8 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -3159,9 +3109,7 @@ define <16 x i16> @vp_ctlz_zero_undef_v16i16_unmasked(<16 x i16> %va, i32 zeroex
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -3241,9 +3189,7 @@ define <2 x i32> @vp_ctlz_zero_undef_v2i32_unmasked(<2 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -3323,9 +3269,7 @@ define <4 x i32> @vp_ctlz_zero_undef_v4i32_unmasked(<4 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -3405,9 +3349,7 @@ define <8 x i32> @vp_ctlz_zero_undef_v8i32_unmasked(<8 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -3487,9 +3429,7 @@ define <16 x i32> @vp_ctlz_zero_undef_v16i32_unmasked(<16 x i32> %va, i32 zeroex
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -3697,9 +3637,7 @@ define <2 x i64> @vp_ctlz_zero_undef_v2i64_unmasked(<2 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -3907,9 +3845,7 @@ define <4 x i64> @vp_ctlz_zero_undef_v4i64_unmasked(<4 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -4117,9 +4053,7 @@ define <8 x i64> @vp_ctlz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -4357,9 +4291,7 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, i1 true, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, i1 true, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -4597,9 +4529,7 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -5212,8 +5142,6 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
 ; RV64-NEXT:    vmul.vx v16, v16, a5
 ; RV64-NEXT:    vsrl.vx v16, v16, a6
 ; RV64-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, i1 true, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index c4b22955f84c4f..0b6d8b33394d50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -44,9 +44,7 @@ define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -90,9 +88,7 @@ define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -136,9 +132,7 @@ define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -182,9 +176,7 @@ define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -242,9 +234,7 @@ define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -302,9 +292,7 @@ define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -362,9 +350,7 @@ define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -422,9 +408,7 @@ define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -484,9 +468,7 @@ define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -546,9 +528,7 @@ define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -608,9 +588,7 @@ define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -670,9 +648,7 @@ define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -826,9 +802,7 @@ define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -982,9 +956,7 @@ define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1138,9 +1110,7 @@ define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1324,9 +1294,7 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -1510,9 +1478,7 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1962,8 +1928,6 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vmul.vx v16, v16, a4
 ; RV64-NEXT:    vsrl.vx v16, v16, a5
 ; RV64-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
index 49f6ffd691292a..f2926fa91e5c26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
@@ -52,9 +52,7 @@ define <2 x i8> @vp_cttz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -106,9 +104,7 @@ define <4 x i8> @vp_cttz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -160,9 +156,7 @@ define <8 x i8> @vp_cttz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -214,9 +208,7 @@ define <16 x i8> @vp_cttz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -282,9 +274,7 @@ define <2 x i16> @vp_cttz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -350,9 +340,7 @@ define <4 x i16> @vp_cttz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -418,9 +406,7 @@ define <8 x i16> @vp_cttz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -486,9 +472,7 @@ define <16 x i16> @vp_cttz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -556,9 +540,7 @@ define <2 x i32> @vp_cttz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -626,9 +608,7 @@ define <4 x i32> @vp_cttz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -696,9 +676,7 @@ define <8 x i32> @vp_cttz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -766,9 +744,7 @@ define <16 x i32> @vp_cttz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -938,9 +914,7 @@ define <2 x i64> @vp_cttz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1110,9 +1084,7 @@ define <4 x i64> @vp_cttz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1282,9 +1254,7 @@ define <8 x i64> @vp_cttz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1484,9 +1454,7 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -1686,9 +1654,7 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -2223,9 +2189,7 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vmul.vx v16, v16, a5
 ; RV64-NEXT:    vsrl.vx v16, v16, a6
 ; RV64-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -2275,9 +2239,7 @@ define <2 x i8> @vp_cttz_zero_undef_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -2327,9 +2289,7 @@ define <4 x i8> @vp_cttz_zero_undef_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -2379,9 +2339,7 @@ define <8 x i8> @vp_cttz_zero_undef_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -2431,9 +2389,7 @@ define <16 x i8> @vp_cttz_zero_undef_v16i8_unmasked(<16 x i8> %va, i32 zeroext %
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -2497,9 +2453,7 @@ define <2 x i16> @vp_cttz_zero_undef_v2i16_unmasked(<2 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -2563,9 +2517,7 @@ define <4 x i16> @vp_cttz_zero_undef_v4i16_unmasked(<4 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -2629,9 +2581,7 @@ define <8 x i16> @vp_cttz_zero_undef_v8i16_unmasked(<8 x i16> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -2695,9 +2645,7 @@ define <16 x i16> @vp_cttz_zero_undef_v16i16_unmasked(<16 x i16> %va, i32 zeroex
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -2763,9 +2711,7 @@ define <2 x i32> @vp_cttz_zero_undef_v2i32_unmasked(<2 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -2831,9 +2777,7 @@ define <4 x i32> @vp_cttz_zero_undef_v4i32_unmasked(<4 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -2899,9 +2843,7 @@ define <8 x i32> @vp_cttz_zero_undef_v8i32_unmasked(<8 x i32> %va, i32 zeroext %
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -2967,9 +2909,7 @@ define <16 x i32> @vp_cttz_zero_undef_v16i32_unmasked(<16 x i32> %va, i32 zeroex
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -3137,9 +3077,7 @@ define <2 x i64> @vp_cttz_zero_undef_v2i64_unmasked(<2 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, i1 true, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, i1 true, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -3307,9 +3245,7 @@ define <4 x i64> @vp_cttz_zero_undef_v4i64_unmasked(<4 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, i1 true, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, i1 true, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -3477,9 +3413,7 @@ define <8 x i64> @vp_cttz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, i1 true, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, i1 true, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -3677,9 +3611,7 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, i1 true, <15 x i1> %m, i32 %evl)
+  %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, i1 true, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
 
@@ -3877,9 +3809,7 @@ define <16 x i64> @vp_cttz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
 ; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, i1 true, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, i1 true, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -4412,8 +4342,6 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
 ; RV64-NEXT:    vmul.vx v16, v16, a5
 ; RV64-NEXT:    vsrl.vx v16, v16, a6
 ; RV64-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 true, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, i1 true, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 583742224f8cf1..6c2be509f7c22e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -86,9 +86,7 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -388,9 +380,7 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -432,9 +422,7 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -478,9 +466,7 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -524,9 +510,7 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -568,9 +552,7 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -614,9 +596,7 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -660,9 +640,7 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -706,9 +684,7 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -752,9 +728,7 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +841,6 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index 3b7480117d375a..edb33158e32eba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -76,9 +76,7 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.maximum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.maximum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -150,9 +148,7 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.maximum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.maximum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -226,9 +222,7 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.maximum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.maximum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -304,9 +298,7 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.maximum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.maximum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -340,9 +332,7 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.maximum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.maximum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -376,9 +366,7 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.maximum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.maximum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -414,9 +402,7 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.maximum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.maximum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -452,9 +438,7 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.maximum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.maximum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -488,9 +472,7 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.maximum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.maximum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -526,9 +508,7 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.maximum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.maximum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -564,9 +544,7 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.maximum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.maximum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -615,9 +593,7 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -808,8 +784,6 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 57275df57a3113..48649c43f782ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -76,9 +76,7 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -150,9 +148,7 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -226,9 +222,7 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -304,9 +298,7 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -340,9 +332,7 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -376,9 +366,7 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -414,9 +402,7 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -452,9 +438,7 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -488,9 +472,7 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -526,9 +508,7 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -564,9 +544,7 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -615,9 +593,7 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -808,8 +784,6 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
index dc907eed16cce6..97842c294036d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
@@ -87,9 +87,7 @@ define void @splat_zero_v8f16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x half> poison, half 0.0, i32 0
-  %b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
-  store <8 x half> %b, ptr %x
+  store <8 x half> splat (half 0.0), ptr %x
   ret void
 }
 
@@ -100,9 +98,7 @@ define void @splat_zero_v4f32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x float> poison, float 0.0, i32 0
-  %b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
-  store <4 x float> %b, ptr %x
+  store <4 x float> splat (float 0.0), ptr %x
   ret void
 }
 
@@ -113,9 +109,7 @@ define void @splat_zero_v2f64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x double> poison, double 0.0, i32 0
-  %b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
-  store <2 x double> %b, ptr %x
+  store <2 x double> splat (double 0.0), ptr %x
   ret void
 }
 
@@ -126,9 +120,7 @@ define void @splat_zero_16f16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x half> poison, half 0.0, i32 0
-  %b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
-  store <16 x half> %b, ptr %x
+  store <16 x half> splat (half 0.0), ptr %x
   ret void
 }
 
@@ -139,9 +131,7 @@ define void @splat_zero_v8f32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x float> poison, float 0.0, i32 0
-  %b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
-  store <8 x float> %b, ptr %x
+  store <8 x float> splat (float 0.0), ptr %x
   ret void
 }
 
@@ -152,9 +142,7 @@ define void @splat_zero_v4f64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x double> poison, double 0.0, i32 0
-  %b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
-  store <4 x double> %b, ptr %x
+  store <4 x double> splat (double 0.0), ptr %x
   ret void
 }
 
@@ -166,9 +154,7 @@ define void @splat_negzero_v8f16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x half> poison, half -0.0, i32 0
-  %b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
-  store <8 x half> %b, ptr %x
+  store <8 x half> splat (half -0.0), ptr %x
   ret void
 }
 
@@ -180,9 +166,7 @@ define void @splat_negzero_v4f32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x float> poison, float -0.0, i32 0
-  %b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
-  store <4 x float> %b, ptr %x
+  store <4 x float> splat (float -0.0), ptr %x
   ret void
 }
 
@@ -204,9 +188,7 @@ define void @splat_negzero_v2f64(ptr %x) {
 ; CHECK-RV64-NEXT:    vmv.v.x v8, a1
 ; CHECK-RV64-NEXT:    vse64.v v8, (a0)
 ; CHECK-RV64-NEXT:    ret
-  %a = insertelement <2 x double> poison, double -0.0, i32 0
-  %b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
-  store <2 x double> %b, ptr %x
+  store <2 x double> splat (double -0.0), ptr %x
   ret void
 }
 
@@ -218,9 +200,7 @@ define void @splat_negzero_16f16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x half> poison, half -0.0, i32 0
-  %b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
-  store <16 x half> %b, ptr %x
+  store <16 x half> splat (half -0.0), ptr %x
   ret void
 }
 
@@ -232,9 +212,7 @@ define void @splat_negzero_v8f32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x float> poison, float -0.0, i32 0
-  %b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
-  store <8 x float> %b, ptr %x
+  store <8 x float> splat (float -0.0), ptr %x
   ret void
 }
 
@@ -256,8 +234,6 @@ define void @splat_negzero_v4f64(ptr %x) {
 ; CHECK-RV64-NEXT:    vmv.v.x v8, a1
 ; CHECK-RV64-NEXT:    vse64.v v8, (a0)
 ; CHECK-RV64-NEXT:    ret
-  %a = insertelement <4 x double> poison, double -0.0, i32 0
-  %b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
-  store <4 x double> %b, ptr %x
+  store <4 x double> splat (double -0.0), ptr %x
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
index 5de28a0d722d0e..51ac27acaf470d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
@@ -24,7 +24,7 @@ define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl)
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x float> %v
 }
 
@@ -50,7 +50,7 @@ define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl)
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x double> %v
 }
 
@@ -74,7 +74,7 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl
 ; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
index dab4b4d9926e1d..602662b184290a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
@@ -42,7 +42,7 @@ define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
 ; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
 
@@ -66,7 +66,7 @@ define <4 x i1> @vfptosi_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
 
@@ -91,6 +91,6 @@ define <4 x i1> @vfptosi_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
index c673e396914bf3..49a1b19b58a270 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
@@ -67,7 +67,7 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -105,7 +105,7 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -145,7 +145,7 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -187,7 +187,7 @@ define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.rtz.x.f.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -213,7 +213,7 @@ define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -237,7 +237,7 @@ define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -259,7 +259,7 @@ define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -283,7 +283,7 @@ define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -313,7 +313,7 @@ define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -339,7 +339,7 @@ define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -363,7 +363,7 @@ define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -385,7 +385,7 @@ define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -434,6 +434,6 @@ define <32 x i64> @vfptosi_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
 ; CHECK-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
index f1a78b25e18620..c5bfd41ec9510d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
@@ -42,7 +42,7 @@ define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
 ; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
 
@@ -66,7 +66,7 @@ define <4 x i1> @vfptoui_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
 
@@ -91,6 +91,6 @@ define <4 x i1> @vfptoui_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i1> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
index 0a19dcb550b58e..d44efa2f6133f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
@@ -67,7 +67,7 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -105,7 +105,7 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -145,7 +145,7 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -187,7 +187,7 @@ define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -213,7 +213,7 @@ define <4 x i8> @vfptoui_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -237,7 +237,7 @@ define <4 x i16> @vfptoui_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -259,7 +259,7 @@ define <4 x i32> @vfptoui_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -283,7 +283,7 @@ define <4 x i64> @vfptoui_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -313,7 +313,7 @@ define <4 x i8> @vfptoui_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -339,7 +339,7 @@ define <4 x i16> @vfptoui_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -363,7 +363,7 @@ define <4 x i32> @vfptoui_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -385,7 +385,7 @@ define <4 x i64> @vfptoui_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -434,6 +434,6 @@ define <32 x i64> @vfptoui_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v16, v16
 ; CHECK-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index 0d5b59b087b40f..de11f9e8a9fa2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -25,7 +25,7 @@ define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl
 ; CHECK-NEXT:    vfncvt.f.f.w v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x half> %v
 }
 
@@ -51,7 +51,7 @@ define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %v
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x half> %v
 }
 
@@ -75,7 +75,7 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext %
 ; CHECK-NEXT:    vfncvt.f.f.w v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+  %v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> splat (i1 true), i32 %vl)
   ret <2 x float> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
index 4da778a3547260..0b08d9401402b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
@@ -523,9 +523,7 @@ define void @seteq_vi_v16i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 0, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = icmp eq <16 x i8> %a, %c
+  %d = icmp eq <16 x i8> %a, splat (i8 0)
   store <16 x i1> %d, ptr %z
   ret void
 }
@@ -540,9 +538,7 @@ define void @setne_vi_v32i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, ptr %x
-  %b = insertelement <32 x i8> poison, i8 0, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
-  %d = icmp ne <32 x i8> %a, %c
+  %d = icmp ne <32 x i8> %a, splat (i8 0)
   store <32 x i1> %d, ptr %z
   ret void
 }
@@ -557,9 +553,7 @@ define void @setgt_vi_v64i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, ptr %x
-  %b = insertelement <64 x i8> poison, i8 0, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
-  %d = icmp sgt <64 x i8> %a, %c
+  %d = icmp sgt <64 x i8> %a, splat (i8 0)
   store <64 x i1> %d, ptr %z
   ret void
 }
@@ -574,9 +568,7 @@ define void @setgt_vi_v64i8_nonzero(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, ptr %x
-  %b = insertelement <64 x i8> poison, i8 5, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
-  %d = icmp sgt <64 x i8> %a, %c
+  %d = icmp sgt <64 x i8> %a, splat (i8 5)
   store <64 x i1> %d, ptr %z
   ret void
 }
@@ -591,9 +583,7 @@ define void @setlt_vi_v128i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
-  %b = insertelement <128 x i8> poison, i8 0, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
-  %d = icmp slt <128 x i8> %a, %c
+  %d = icmp slt <128 x i8> %a, splat (i8 0)
   store <128 x i1> %d, ptr %z
   ret void
 }
@@ -607,9 +597,7 @@ define void @setge_vi_v8i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, ptr %x
-  %b = insertelement <8 x i8> poison, i8 0, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
-  %d = icmp sge <8 x i8> %a, %c
+  %d = icmp sge <8 x i8> %a, splat (i8 0)
   store <8 x i1> %d, ptr %z
   ret void
 }
@@ -623,9 +611,7 @@ define void @setle_vi_v16i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 0, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = icmp sle <16 x i8> %a, %c
+  %d = icmp sle <16 x i8> %a, splat (i8 0)
   store <16 x i1> %d, ptr %z
   ret void
 }
@@ -640,9 +626,7 @@ define void @setugt_vi_v32i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
   %a = load <32 x i8>, ptr %x
-  %b = insertelement <32 x i8> poison, i8 5, i32 0
-  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
-  %d = icmp ugt <32 x i8> %a, %c
+  %d = icmp ugt <32 x i8> %a, splat (i8 5)
   store <32 x i1> %d, ptr %z
   ret void
 }
@@ -657,9 +641,7 @@ define void @setult_vi_v64i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v12, (a1)
 ; CHECK-NEXT:    ret
   %a = load <64 x i8>, ptr %x
-  %b = insertelement <64 x i8> poison, i8 5, i32 0
-  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
-  %d = icmp ult <64 x i8> %a, %c
+  %d = icmp ult <64 x i8> %a, splat (i8 5)
   store <64 x i1> %d, ptr %z
   ret void
 }
@@ -674,9 +656,7 @@ define void @setuge_vi_v128i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v16, (a1)
 ; CHECK-NEXT:    ret
   %a = load <128 x i8>, ptr %x
-  %b = insertelement <128 x i8> poison, i8 5, i32 0
-  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
-  %d = icmp uge <128 x i8> %a, %c
+  %d = icmp uge <128 x i8> %a, splat (i8 5)
   store <128 x i1> %d, ptr %z
   ret void
 }
@@ -690,9 +670,7 @@ define void @setule_vi_v8i8(ptr %x, ptr %z) {
 ; CHECK-NEXT:    vsm.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, ptr %x
-  %b = insertelement <8 x i8> poison, i8 5, i32 0
-  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
-  %d = icmp ule <8 x i8> %a, %c
+  %d = icmp ule <8 x i8> %a, splat (i8 5)
   store <8 x i1> %d, ptr %z
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index 60202cfba760d8..649aa067b01aff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -140,9 +140,7 @@ define void @splat_zero_v16i8(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i8> poison, i8 0, i32 0
-  %b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
-  store <16 x i8> %b, ptr %x
+  store <16 x i8> splat (i8 0), ptr %x
   ret void
 }
 
@@ -153,9 +151,7 @@ define void @splat_zero_v8i16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i16> poison, i16 0, i32 0
-  %b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
-  store <8 x i16> %b, ptr %x
+  store <8 x i16> splat (i16 0), ptr %x
   ret void
 }
 
@@ -166,9 +162,7 @@ define void @splat_zero_v4i32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i32> poison, i32 0, i32 0
-  %b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
-  store <4 x i32> %b, ptr %x
+  store <4 x i32> splat (i32 0), ptr %x
   ret void
 }
 
@@ -179,9 +173,7 @@ define void @splat_zero_v2i64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i64> poison, i64 0, i32 0
-  %b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
-  store <2 x i64> %b, ptr %x
+  store <2 x i64> splat (i64 0), ptr %x
   ret void
 }
 
@@ -193,9 +185,7 @@ define void @splat_zero_v32i8(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <32 x i8> poison, i8 0, i32 0
-  %b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
-  store <32 x i8> %b, ptr %x
+  store <32 x i8> splat (i8 0), ptr %x
   ret void
 }
 
@@ -206,9 +196,7 @@ define void @splat_zero_v16i16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i16> poison, i16 0, i32 0
-  %b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
-  store <16 x i16> %b, ptr %x
+  store <16 x i16> splat (i16 0), ptr %x
   ret void
 }
 
@@ -219,9 +207,7 @@ define void @splat_zero_v8i32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i32> poison, i32 0, i32 0
-  %b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
-  store <8 x i32> %b, ptr %x
+  store <8 x i32> splat (i32 0), ptr %x
   ret void
 }
 
@@ -232,9 +218,7 @@ define void @splat_zero_v4i64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i64> poison, i64 0, i32 0
-  %b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
-  store <4 x i64> %b, ptr %x
+  store <4 x i64> splat (i64 0), ptr %x
   ret void
 }
 
@@ -311,9 +295,7 @@ define void @splat_allones_v16i8(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i8> poison, i8 -1, i32 0
-  %b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
-  store <16 x i8> %b, ptr %x
+  store <16 x i8> splat (i8 -1), ptr %x
   ret void
 }
 
@@ -324,9 +306,7 @@ define void @splat_allones_v8i16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i16> poison, i16 -1, i32 0
-  %b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
-  store <8 x i16> %b, ptr %x
+  store <8 x i16> splat (i16 -1), ptr %x
   ret void
 }
 
@@ -337,9 +317,7 @@ define void @splat_allones_v4i32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i32> poison, i32 -1, i32 0
-  %b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
-  store <4 x i32> %b, ptr %x
+  store <4 x i32> splat (i32 -1), ptr %x
   ret void
 }
 
@@ -350,9 +328,7 @@ define void @splat_allones_v2i64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i64> poison, i64 -1, i32 0
-  %b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
-  store <2 x i64> %b, ptr %x
+  store <2 x i64> splat (i64 -1), ptr %x
   ret void
 }
 
@@ -364,9 +340,7 @@ define void @splat_allones_v32i8(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <32 x i8> poison, i8 -1, i32 0
-  %b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
-  store <32 x i8> %b, ptr %x
+  store <32 x i8> splat (i8 -1), ptr %x
   ret void
 }
 
@@ -377,9 +351,7 @@ define void @splat_allones_v16i16(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <16 x i16> poison, i16 -1, i32 0
-  %b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
-  store <16 x i16> %b, ptr %x
+  store <16 x i16> splat (i16 -1), ptr %x
   ret void
 }
 
@@ -390,9 +362,7 @@ define void @splat_allones_v8i32(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i32> poison, i32 -1, i32 0
-  %b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
-  store <8 x i32> %b, ptr %x
+  store <8 x i32> splat (i32 -1), ptr %x
   ret void
 }
 
@@ -403,9 +373,7 @@ define void @splat_allones_v4i64(ptr %x) {
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i64> poison, i64 -1, i32 0
-  %b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
-  store <4 x i64> %b, ptr %x
+  store <4 x i64> splat (i64 -1), ptr %x
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 175b110538ffba..03e99baf91c08e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -3935,9 +3935,7 @@ define void @add_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = add <16 x i8> %a, %c
+  %d = add <16 x i8> %a, splat (i8 -1)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -3951,9 +3949,7 @@ define void @add_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = add <8 x i16> %a, %c
+  %d = add <8 x i16> %a, splat (i16 -1)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -3967,9 +3963,7 @@ define void @add_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = add <4 x i32> %a, %c
+  %d = add <4 x i32> %a, splat (i32 -1)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -3983,9 +3977,7 @@ define void @add_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = add <2 x i64> %a, %c
+  %d = add <2 x i64> %a, splat (i64 -1)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -3999,9 +3991,7 @@ define void @add_iv_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = add <16 x i8> %c, %a
+  %d = add <16 x i8> splat (i8 1), %a
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4015,9 +4005,7 @@ define void @add_iv_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = add <8 x i16> %c, %a
+  %d = add <8 x i16> splat (i16 1), %a
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4031,9 +4019,7 @@ define void @add_iv_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = add <4 x i32> %c, %a
+  %d = add <4 x i32> splat (i32 1), %a
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4047,9 +4033,7 @@ define void @add_iv_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = add <2 x i64> %c, %a
+  %d = add <2 x i64> splat (i64 1), %a
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4160,9 +4144,7 @@ define void @sub_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = sub <16 x i8> %a, %c
+  %d = sub <16 x i8> %a, splat (i8 -1)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4177,9 +4159,7 @@ define void @sub_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = sub <8 x i16> %a, %c
+  %d = sub <8 x i16> %a, splat (i16 -1)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4194,9 +4174,7 @@ define void @sub_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = sub <4 x i32> %a, %c
+  %d = sub <4 x i32> %a, splat (i32 -1)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4211,9 +4189,7 @@ define void @sub_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = sub <2 x i64> %a, %c
+  %d = sub <2 x i64> %a, splat (i64 -1)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4227,9 +4203,7 @@ define void @sub_iv_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = sub <16 x i8> %c, %a
+  %d = sub <16 x i8> splat (i8 1), %a
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4243,9 +4217,7 @@ define void @sub_iv_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = sub <8 x i16> %c, %a
+  %d = sub <8 x i16> splat (i16 1), %a
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4259,9 +4231,7 @@ define void @sub_iv_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = sub <4 x i32> %c, %a
+  %d = sub <4 x i32> splat (i32 1), %a
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4275,9 +4245,7 @@ define void @sub_iv_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = sub <2 x i64> %c, %a
+  %d = sub <2 x i64> splat (i64 1), %a
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4483,9 +4451,7 @@ define void @and_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 -2, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = and <16 x i8> %a, %c
+  %d = and <16 x i8> %a, splat (i8 -2)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4499,9 +4465,7 @@ define void @and_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 -2, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = and <8 x i16> %a, %c
+  %d = and <8 x i16> %a, splat (i16 -2)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4515,9 +4479,7 @@ define void @and_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 -2, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = and <4 x i32> %a, %c
+  %d = and <4 x i32> %a, splat (i32 -2)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4531,9 +4493,7 @@ define void @and_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 -2, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = and <2 x i64> %a, %c
+  %d = and <2 x i64> %a, splat (i64 -2)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4547,9 +4507,7 @@ define void @and_iv_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = and <16 x i8> %c, %a
+  %d = and <16 x i8> splat (i8 1), %a
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4563,9 +4521,7 @@ define void @and_iv_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = and <8 x i16> %c, %a
+  %d = and <8 x i16> splat (i16 1), %a
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4579,9 +4535,7 @@ define void @and_iv_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = and <4 x i32> %c, %a
+  %d = and <4 x i32> splat (i32 1), %a
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4595,9 +4549,7 @@ define void @and_iv_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = and <2 x i64> %c, %a
+  %d = and <2 x i64> splat (i64 1), %a
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4707,9 +4659,7 @@ define void @or_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 -2, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = or <16 x i8> %a, %c
+  %d = or <16 x i8> %a, splat (i8 -2)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4723,9 +4673,7 @@ define void @or_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 -2, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = or <8 x i16> %a, %c
+  %d = or <8 x i16> %a, splat (i16 -2)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4739,9 +4687,7 @@ define void @or_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 -2, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = or <4 x i32> %a, %c
+  %d = or <4 x i32> %a, splat (i32 -2)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4755,9 +4701,7 @@ define void @or_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 -2, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = or <2 x i64> %a, %c
+  %d = or <2 x i64> %a, splat (i64 -2)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4771,9 +4715,7 @@ define void @or_iv_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = or <16 x i8> %c, %a
+  %d = or <16 x i8> splat (i8 1), %a
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4787,9 +4729,7 @@ define void @or_iv_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = or <8 x i16> %c, %a
+  %d = or <8 x i16> splat (i16 1), %a
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4803,9 +4743,7 @@ define void @or_iv_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = or <4 x i32> %c, %a
+  %d = or <4 x i32> splat (i32 1), %a
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4819,9 +4757,7 @@ define void @or_iv_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = or <2 x i64> %c, %a
+  %d = or <2 x i64> splat (i64 1), %a
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4931,9 +4867,7 @@ define void @xor_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 -1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = xor <16 x i8> %a, %c
+  %d = xor <16 x i8> %a, splat (i8 -1)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -4947,9 +4881,7 @@ define void @xor_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 -1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = xor <8 x i16> %a, %c
+  %d = xor <8 x i16> %a, splat (i16 -1)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -4963,9 +4895,7 @@ define void @xor_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 -1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = xor <4 x i32> %a, %c
+  %d = xor <4 x i32> %a, splat (i32 -1)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -4979,9 +4909,7 @@ define void @xor_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 -1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = xor <2 x i64> %a, %c
+  %d = xor <2 x i64> %a, splat (i64 -1)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -4995,9 +4923,7 @@ define void @xor_iv_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 1, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = xor <16 x i8> %c, %a
+  %d = xor <16 x i8> splat (i8 1), %a
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -5011,9 +4937,7 @@ define void @xor_iv_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 1, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = xor <8 x i16> %c, %a
+  %d = xor <8 x i16> splat (i16 1), %a
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -5027,9 +4951,7 @@ define void @xor_iv_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 1, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = xor <4 x i32> %c, %a
+  %d = xor <4 x i32> splat (i32 1), %a
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -5043,9 +4965,7 @@ define void @xor_iv_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 1, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = xor <2 x i64> %c, %a
+  %d = xor <2 x i64> splat (i64 1), %a
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -5155,9 +5075,7 @@ define void @lshr_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = lshr <16 x i8> %a, %c
+  %d = lshr <16 x i8> %a, splat (i8 7)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -5171,9 +5089,7 @@ define void @lshr_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = lshr <8 x i16> %a, %c
+  %d = lshr <8 x i16> %a, splat (i16 15)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -5187,9 +5103,7 @@ define void @lshr_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = lshr <4 x i32> %a, %c
+  %d = lshr <4 x i32> %a, splat (i32 31)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -5203,9 +5117,7 @@ define void @lshr_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = lshr <2 x i64> %a, %c
+  %d = lshr <2 x i64> %a, splat (i64 31)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -5267,9 +5179,7 @@ define void @ashr_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = ashr <16 x i8> %a, %c
+  %d = ashr <16 x i8> %a, splat (i8 7)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -5283,9 +5193,7 @@ define void @ashr_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = ashr <8 x i16> %a, %c
+  %d = ashr <8 x i16> %a, splat (i16 15)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -5299,9 +5207,7 @@ define void @ashr_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = ashr <4 x i32> %a, %c
+  %d = ashr <4 x i32> %a, splat (i32 31)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -5315,9 +5221,7 @@ define void @ashr_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = ashr <2 x i64> %a, %c
+  %d = ashr <2 x i64> %a, splat (i64 31)
   store <2 x i64> %d, ptr %x
   ret void
 }
@@ -5379,9 +5283,7 @@ define void @shl_vi_v16i8(ptr %x) {
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <16 x i8>, ptr %x
-  %b = insertelement <16 x i8> poison, i8 7, i32 0
-  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
-  %d = shl <16 x i8> %a, %c
+  %d = shl <16 x i8> %a, splat (i8 7)
   store <16 x i8> %d, ptr %x
   ret void
 }
@@ -5395,9 +5297,7 @@ define void @shl_vi_v8i16(ptr %x) {
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %x
-  %b = insertelement <8 x i16> poison, i16 15, i32 0
-  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
-  %d = shl <8 x i16> %a, %c
+  %d = shl <8 x i16> %a, splat (i16 15)
   store <8 x i16> %d, ptr %x
   ret void
 }
@@ -5411,9 +5311,7 @@ define void @shl_vi_v4i32(ptr %x) {
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
-  %b = insertelement <4 x i32> poison, i32 31, i32 0
-  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
-  %d = shl <4 x i32> %a, %c
+  %d = shl <4 x i32> %a, splat (i32 31)
   store <4 x i32> %d, ptr %x
   ret void
 }
@@ -5427,9 +5325,7 @@ define void @shl_vi_v2i64(ptr %x) {
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %a = load <2 x i64>, ptr %x
-  %b = insertelement <2 x i64> poison, i64 31, i32 0
-  %c = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
-  %d = shl <2 x i64> %a, %c
+  %d = shl <2 x i64> %a, splat (i64 31)
   store <2 x i64> %d, ptr %x
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index a09ab3ee0252a7..9fbc22221f99bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -518,55 +518,20 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i8:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB9_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB9_6
-; RV64ZVE32F-NEXT:  .LBB9_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB9_7
-; RV64ZVE32F-NEXT:  .LBB9_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB9_8
-; RV64ZVE32F-NEXT:  .LBB9_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB9_5: # %cond.load
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e8, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB9_2
-; RV64ZVE32F-NEXT:  .LBB9_6: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB9_3
-; RV64ZVE32F-NEXT:  .LBB9_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a1, 8(a0)
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    lbu a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB9_4
-; RV64ZVE32F-NEXT:  .LBB9_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    lbu a0, 0(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse8.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> %mtrue, <4 x i8> %passthru)
+  %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1), <4 x i8> %passthru)
   ret <4 x i8> %v
 }
 
@@ -1242,55 +1207,20 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i16:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB20_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB20_6
-; RV64ZVE32F-NEXT:  .LBB20_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB20_7
-; RV64ZVE32F-NEXT:  .LBB20_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB20_8
-; RV64ZVE32F-NEXT:  .LBB20_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB20_5: # %cond.load
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    lh a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB20_2
-; RV64ZVE32F-NEXT:  .LBB20_6: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    lh a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB20_3
-; RV64ZVE32F-NEXT:  .LBB20_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a1, 8(a0)
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    lh a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB20_4
-; RV64ZVE32F-NEXT:  .LBB20_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    lh a0, 0(a0)
+; RV64ZVE32F-NEXT:    lh a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue, <4 x i16> %passthru)
+  %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x i16> %passthru)
   ret <4 x i16> %v
 }
 
@@ -2326,55 +2256,20 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i32:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB32_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB32_6
-; RV64ZVE32F-NEXT:  .LBB32_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB32_7
-; RV64ZVE32F-NEXT:  .LBB32_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB32_8
-; RV64ZVE32F-NEXT:  .LBB32_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB32_5: # %cond.load
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB32_2
-; RV64ZVE32F-NEXT:  .LBB32_6: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB32_3
-; RV64ZVE32F-NEXT:  .LBB32_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a1, 8(a0)
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    lw a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB32_4
-; RV64ZVE32F-NEXT:  .LBB32_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    lw a0, 0(a0)
+; RV64ZVE32F-NEXT:    lw a3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue, <4 x i32> %passthru)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x i32> %passthru)
   ret <4 x i32> %v
 }
 
@@ -3839,117 +3734,48 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
 ;
 ; RV32ZVE32F-LABEL: mgather_truemask_v4i64:
 ; RV32ZVE32F:       # %bb.0:
-; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT:    vmset.m v9
-; RV32ZVE32F-NEXT:    vmv.x.s a6, v9
-; RV32ZVE32F-NEXT:    bnez zero, .LBB45_5
-; RV32ZVE32F-NEXT:  # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a3, v8
-; RV32ZVE32F-NEXT:    lw a2, 4(a3)
-; RV32ZVE32F-NEXT:    lw a3, 0(a3)
-; RV32ZVE32F-NEXT:    andi a4, a6, 2
-; RV32ZVE32F-NEXT:    bnez a4, .LBB45_6
-; RV32ZVE32F-NEXT:  .LBB45_2:
-; RV32ZVE32F-NEXT:    lw a4, 12(a1)
-; RV32ZVE32F-NEXT:    lw a5, 8(a1)
-; RV32ZVE32F-NEXT:    andi a7, a6, 4
-; RV32ZVE32F-NEXT:    bnez a7, .LBB45_7
-; RV32ZVE32F-NEXT:  .LBB45_3:
-; RV32ZVE32F-NEXT:    lw a7, 20(a1)
-; RV32ZVE32F-NEXT:    lw t0, 16(a1)
-; RV32ZVE32F-NEXT:    andi a6, a6, 8
-; RV32ZVE32F-NEXT:    bnez a6, .LBB45_8
-; RV32ZVE32F-NEXT:  .LBB45_4:
-; RV32ZVE32F-NEXT:    lw a6, 28(a1)
-; RV32ZVE32F-NEXT:    lw a1, 24(a1)
-; RV32ZVE32F-NEXT:    j .LBB45_9
-; RV32ZVE32F-NEXT:  .LBB45_5:
-; RV32ZVE32F-NEXT:    lw a2, 4(a1)
-; RV32ZVE32F-NEXT:    lw a3, 0(a1)
-; RV32ZVE32F-NEXT:    andi a4, a6, 2
-; RV32ZVE32F-NEXT:    beqz a4, .LBB45_2
-; RV32ZVE32F-NEXT:  .LBB45_6: # %cond.load1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV32ZVE32F-NEXT:    lw a2, 0(a1)
+; RV32ZVE32F-NEXT:    lw a1, 4(a1)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    vmv.x.s a5, v9
-; RV32ZVE32F-NEXT:    lw a4, 4(a5)
-; RV32ZVE32F-NEXT:    lw a5, 0(a5)
-; RV32ZVE32F-NEXT:    andi a7, a6, 4
-; RV32ZVE32F-NEXT:    beqz a7, .LBB45_3
-; RV32ZVE32F-NEXT:  .LBB45_7: # %cond.load4
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV32ZVE32F-NEXT:    lw a4, 0(a3)
+; RV32ZVE32F-NEXT:    lw a3, 4(a3)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV32ZVE32F-NEXT:    vmv.x.s t0, v9
-; RV32ZVE32F-NEXT:    lw a7, 4(t0)
-; RV32ZVE32F-NEXT:    lw t0, 0(t0)
-; RV32ZVE32F-NEXT:    andi a6, a6, 8
-; RV32ZVE32F-NEXT:    beqz a6, .LBB45_4
-; RV32ZVE32F-NEXT:  .LBB45_8: # %cond.load7
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a5, v9
+; RV32ZVE32F-NEXT:    lw a6, 0(a5)
+; RV32ZVE32F-NEXT:    lw a5, 4(a5)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV32ZVE32F-NEXT:    lw a6, 4(a1)
-; RV32ZVE32F-NEXT:    lw a1, 0(a1)
-; RV32ZVE32F-NEXT:  .LBB45_9: # %else8
-; RV32ZVE32F-NEXT:    sw a3, 0(a0)
-; RV32ZVE32F-NEXT:    sw a2, 4(a0)
-; RV32ZVE32F-NEXT:    sw a5, 8(a0)
-; RV32ZVE32F-NEXT:    sw a4, 12(a0)
-; RV32ZVE32F-NEXT:    sw t0, 16(a0)
-; RV32ZVE32F-NEXT:    sw a7, 20(a0)
-; RV32ZVE32F-NEXT:    sw a1, 24(a0)
-; RV32ZVE32F-NEXT:    sw a6, 28(a0)
+; RV32ZVE32F-NEXT:    vmv.x.s a7, v8
+; RV32ZVE32F-NEXT:    lw t0, 4(a7)
+; RV32ZVE32F-NEXT:    lw a7, 0(a7)
+; RV32ZVE32F-NEXT:    sw a1, 4(a0)
+; RV32ZVE32F-NEXT:    sw a2, 0(a0)
+; RV32ZVE32F-NEXT:    sw t0, 28(a0)
+; RV32ZVE32F-NEXT:    sw a7, 24(a0)
+; RV32ZVE32F-NEXT:    sw a5, 20(a0)
+; RV32ZVE32F-NEXT:    sw a6, 16(a0)
+; RV32ZVE32F-NEXT:    sw a3, 12(a0)
+; RV32ZVE32F-NEXT:    sw a4, 8(a0)
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a5, v8
-; RV64ZVE32F-NEXT:    bnez zero, .LBB45_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
-; RV64ZVE32F-NEXT:    ld a3, 0(a1)
-; RV64ZVE32F-NEXT:    ld a3, 0(a3)
-; RV64ZVE32F-NEXT:    andi a4, a5, 2
-; RV64ZVE32F-NEXT:    bnez a4, .LBB45_6
-; RV64ZVE32F-NEXT:  .LBB45_2:
-; RV64ZVE32F-NEXT:    ld a4, 8(a2)
-; RV64ZVE32F-NEXT:    andi a6, a5, 4
-; RV64ZVE32F-NEXT:    bnez a6, .LBB45_7
-; RV64ZVE32F-NEXT:  .LBB45_3:
-; RV64ZVE32F-NEXT:    ld a6, 16(a2)
-; RV64ZVE32F-NEXT:    andi a5, a5, 8
-; RV64ZVE32F-NEXT:    bnez a5, .LBB45_8
-; RV64ZVE32F-NEXT:  .LBB45_4:
-; RV64ZVE32F-NEXT:    ld a1, 24(a2)
-; RV64ZVE32F-NEXT:    j .LBB45_9
-; RV64ZVE32F-NEXT:  .LBB45_5:
-; RV64ZVE32F-NEXT:    ld a3, 0(a2)
-; RV64ZVE32F-NEXT:    andi a4, a5, 2
-; RV64ZVE32F-NEXT:    beqz a4, .LBB45_2
-; RV64ZVE32F-NEXT:  .LBB45_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 24(a1)
+; RV64ZVE32F-NEXT:    ld a3, 16(a1)
 ; RV64ZVE32F-NEXT:    ld a4, 8(a1)
+; RV64ZVE32F-NEXT:    ld a1, 0(a1)
+; RV64ZVE32F-NEXT:    ld a2, 0(a2)
+; RV64ZVE32F-NEXT:    ld a3, 0(a3)
 ; RV64ZVE32F-NEXT:    ld a4, 0(a4)
-; RV64ZVE32F-NEXT:    andi a6, a5, 4
-; RV64ZVE32F-NEXT:    beqz a6, .LBB45_3
-; RV64ZVE32F-NEXT:  .LBB45_7: # %cond.load4
-; RV64ZVE32F-NEXT:    ld a6, 16(a1)
-; RV64ZVE32F-NEXT:    ld a6, 0(a6)
-; RV64ZVE32F-NEXT:    andi a5, a5, 8
-; RV64ZVE32F-NEXT:    beqz a5, .LBB45_4
-; RV64ZVE32F-NEXT:  .LBB45_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a1, 24(a1)
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:  .LBB45_9: # %else8
-; RV64ZVE32F-NEXT:    sd a3, 0(a0)
+; RV64ZVE32F-NEXT:    sd a2, 24(a0)
+; RV64ZVE32F-NEXT:    sd a3, 16(a0)
 ; RV64ZVE32F-NEXT:    sd a4, 8(a0)
-; RV64ZVE32F-NEXT:    sd a6, 16(a0)
-; RV64ZVE32F-NEXT:    sd a1, 24(a0)
+; RV64ZVE32F-NEXT:    sd a1, 0(a0)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue, <4 x i64> %passthru)
+  %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1), <4 x i64> %passthru)
   ret <4 x i64> %v
 }
 
@@ -7190,55 +7016,20 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f16:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB61_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB61_6
-; RV64ZVE32F-NEXT:  .LBB61_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB61_7
-; RV64ZVE32F-NEXT:  .LBB61_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB61_8
-; RV64ZVE32F-NEXT:  .LBB61_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB61_5: # %cond.load
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB61_2
-; RV64ZVE32F-NEXT:  .LBB61_6: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB61_3
-; RV64ZVE32F-NEXT:  .LBB61_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a1, 8(a0)
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB61_4
-; RV64ZVE32F-NEXT:  .LBB61_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a1)
+; RV64ZVE32F-NEXT:    flh fa4, 0(a2)
+; RV64ZVE32F-NEXT:    flh fa3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa5
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa4
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa3
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue, <4 x half> %passthru)
+  %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x half> %passthru)
   ret <4 x half> %v
 }
 
@@ -8148,55 +7939,20 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f32:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB71_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB71_6
-; RV64ZVE32F-NEXT:  .LBB71_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB71_7
-; RV64ZVE32F-NEXT:  .LBB71_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB71_8
-; RV64ZVE32F-NEXT:  .LBB71_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB71_5: # %cond.load
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB71_2
-; RV64ZVE32F-NEXT:  .LBB71_6: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB71_3
-; RV64ZVE32F-NEXT:  .LBB71_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a1, 8(a0)
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB71_4
-; RV64ZVE32F-NEXT:  .LBB71_8: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a1)
+; RV64ZVE32F-NEXT:    flw fa4, 0(a2)
+; RV64ZVE32F-NEXT:    flw fa3, 0(a3)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa5
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa4
+; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa3
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue, <4 x float> %passthru)
+  %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x float> %passthru)
   ret <4 x float> %v
 }
 
@@ -9627,95 +9383,40 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt
 ;
 ; RV32ZVE32F-LABEL: mgather_truemask_v4f64:
 ; RV32ZVE32F:       # %bb.0:
-; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT:    vmset.m v9
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV32ZVE32F-NEXT:    beqz zero, .LBB84_6
-; RV32ZVE32F-NEXT:  # %bb.1: # %else
-; RV32ZVE32F-NEXT:    andi a2, a1, 2
-; RV32ZVE32F-NEXT:    bnez a2, .LBB84_7
-; RV32ZVE32F-NEXT:  .LBB84_2: # %else2
-; RV32ZVE32F-NEXT:    andi a2, a1, 4
-; RV32ZVE32F-NEXT:    bnez a2, .LBB84_8
-; RV32ZVE32F-NEXT:  .LBB84_3: # %else5
-; RV32ZVE32F-NEXT:    andi a1, a1, 8
-; RV32ZVE32F-NEXT:    beqz a1, .LBB84_5
-; RV32ZVE32F-NEXT:  .LBB84_4: # %cond.load7
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV32ZVE32F-NEXT:    fld fa5, 0(a1)
+; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
+; RV32ZVE32F-NEXT:    fld fa4, 0(a1)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV32ZVE32F-NEXT:    fld fa3, 0(a1)
-; RV32ZVE32F-NEXT:  .LBB84_5: # %else8
-; RV32ZVE32F-NEXT:    fsd fa0, 0(a0)
-; RV32ZVE32F-NEXT:    fsd fa1, 8(a0)
-; RV32ZVE32F-NEXT:    fsd fa2, 16(a0)
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV32ZVE32F-NEXT:    fld fa2, 0(a1)
+; RV32ZVE32F-NEXT:    fsd fa5, 0(a0)
 ; RV32ZVE32F-NEXT:    fsd fa3, 24(a0)
+; RV32ZVE32F-NEXT:    fsd fa4, 16(a0)
+; RV32ZVE32F-NEXT:    fsd fa2, 8(a0)
 ; RV32ZVE32F-NEXT:    ret
-; RV32ZVE32F-NEXT:  .LBB84_6: # %cond.load
-; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a2, v8
-; RV32ZVE32F-NEXT:    fld fa0, 0(a2)
-; RV32ZVE32F-NEXT:    andi a2, a1, 2
-; RV32ZVE32F-NEXT:    beqz a2, .LBB84_2
-; RV32ZVE32F-NEXT:  .LBB84_7: # %cond.load1
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    vmv.x.s a2, v9
-; RV32ZVE32F-NEXT:    fld fa1, 0(a2)
-; RV32ZVE32F-NEXT:    andi a2, a1, 4
-; RV32ZVE32F-NEXT:    beqz a2, .LBB84_3
-; RV32ZVE32F-NEXT:  .LBB84_8: # %cond.load4
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV32ZVE32F-NEXT:    vmv.x.s a2, v9
-; RV32ZVE32F-NEXT:    fld fa2, 0(a2)
-; RV32ZVE32F-NEXT:    andi a1, a1, 8
-; RV32ZVE32F-NEXT:    bnez a1, .LBB84_4
-; RV32ZVE32F-NEXT:    j .LBB84_5
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB84_6
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a3, a2, 2
-; RV64ZVE32F-NEXT:    bnez a3, .LBB84_7
-; RV64ZVE32F-NEXT:  .LBB84_2: # %else2
-; RV64ZVE32F-NEXT:    andi a3, a2, 4
-; RV64ZVE32F-NEXT:    bnez a3, .LBB84_8
-; RV64ZVE32F-NEXT:  .LBB84_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a2, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB84_5
-; RV64ZVE32F-NEXT:  .LBB84_4: # %cond.load7
-; RV64ZVE32F-NEXT:    ld a1, 24(a1)
-; RV64ZVE32F-NEXT:    fld fa3, 0(a1)
-; RV64ZVE32F-NEXT:  .LBB84_5: # %else8
-; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
-; RV64ZVE32F-NEXT:    fsd fa1, 8(a0)
-; RV64ZVE32F-NEXT:    fsd fa2, 16(a0)
-; RV64ZVE32F-NEXT:    fsd fa3, 24(a0)
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB84_6: # %cond.load
-; RV64ZVE32F-NEXT:    ld a3, 0(a1)
-; RV64ZVE32F-NEXT:    fld fa0, 0(a3)
-; RV64ZVE32F-NEXT:    andi a3, a2, 2
-; RV64ZVE32F-NEXT:    beqz a3, .LBB84_2
-; RV64ZVE32F-NEXT:  .LBB84_7: # %cond.load1
-; RV64ZVE32F-NEXT:    ld a3, 8(a1)
-; RV64ZVE32F-NEXT:    fld fa1, 0(a3)
-; RV64ZVE32F-NEXT:    andi a3, a2, 4
-; RV64ZVE32F-NEXT:    beqz a3, .LBB84_3
-; RV64ZVE32F-NEXT:  .LBB84_8: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a2, 24(a1)
 ; RV64ZVE32F-NEXT:    ld a3, 16(a1)
-; RV64ZVE32F-NEXT:    fld fa2, 0(a3)
-; RV64ZVE32F-NEXT:    andi a2, a2, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB84_4
-; RV64ZVE32F-NEXT:    j .LBB84_5
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue, <4 x double> %passthru)
+; RV64ZVE32F-NEXT:    ld a4, 8(a1)
+; RV64ZVE32F-NEXT:    ld a1, 0(a1)
+; RV64ZVE32F-NEXT:    fld fa5, 0(a2)
+; RV64ZVE32F-NEXT:    fld fa4, 0(a3)
+; RV64ZVE32F-NEXT:    fld fa3, 0(a4)
+; RV64ZVE32F-NEXT:    fld fa2, 0(a1)
+; RV64ZVE32F-NEXT:    fsd fa5, 24(a0)
+; RV64ZVE32F-NEXT:    fsd fa4, 16(a0)
+; RV64ZVE32F-NEXT:    fsd fa3, 8(a0)
+; RV64ZVE32F-NEXT:    fsd fa2, 0(a0)
+; RV64ZVE32F-NEXT:    ret
+  %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1), <4 x double> %passthru)
   ret <4 x double> %v
 }
 
@@ -12850,10 +12551,8 @@ define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12864,11 +12563,9 @@ define <4 x i32> @mgather_broadcast_load_unmasked2(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrhead = insertelement <4 x ptr> poison, ptr %base, i32 0
   %ptrs = shufflevector <4 x ptr> %ptrhead, <4 x ptr> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12878,8 +12575,6 @@ define <4 x i32> @mgather_broadcast_load_masked(ptr %base, <4 x i1> %m) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %m, <4 x i32> poison)
   ret <4 x i32> %v
@@ -12891,10 +12586,8 @@ define <4 x i32> @mgather_unit_stride_load(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32>  <i32 0, i32 1, i32 2, i32 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12905,10 +12598,8 @@ define <4 x i32> @mgather_unit_stride_load_with_offset(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32>  <i32 4, i32 5, i32 6, i32 7>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12918,10 +12609,8 @@ define <4 x i32> @mgather_unit_stride_load_narrow_idx(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i8>  <i8 0, i8 1, i8 2, i8 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12931,10 +12620,8 @@ define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i128>  <i128 0, i128 1, i128 2, i128 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12965,51 +12652,15 @@ define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_narrow_edge_case:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB106_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_6
-; RV64ZVE32F-NEXT:  .LBB106_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_7
-; RV64ZVE32F-NEXT:  .LBB106_3: # %else5
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    bnez a1, .LBB106_8
-; RV64ZVE32F-NEXT:  .LBB106_4: # %else8
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB106_5: # %cond.load
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_2
-; RV64ZVE32F-NEXT:  .LBB106_6: # %cond.load1
-; RV64ZVE32F-NEXT:    lw a2, -512(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_3
-; RV64ZVE32F-NEXT:  .LBB106_7: # %cond.load4
-; RV64ZVE32F-NEXT:    lw a2, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a1, a1, 8
-; RV64ZVE32F-NEXT:    beqz a1, .LBB106_4
-; RV64ZVE32F-NEXT:  .LBB106_8: # %cond.load7
-; RV64ZVE32F-NEXT:    lw a0, -512(a0)
+; RV64ZVE32F-NEXT:    addi a1, a0, -512
+; RV64ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    vlse32.v v8, (a1), zero
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 5
+; RV64ZVE32F-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i8>  <i8 0, i8 128, i8 0, i8 128>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -13017,404 +12668,198 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
 ; RV32-LABEL: mgather_strided_unaligned:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmset.m v8
-; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    vsll.vi v10, v10, 2
-; RV32-NEXT:    vadd.vx v10, v10, a0
-; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vid.v v8
+; RV32-NEXT:    vsll.vi v8, v8, 2
+; RV32-NEXT:    vadd.vx v8, v8, a0
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    # implicit-def: $v8
-; RV32-NEXT:    beqz zero, .LBB107_9
-; RV32-NEXT:  # %bb.1: # %else
-; RV32-NEXT:    andi a1, a0, 2
-; RV32-NEXT:    bnez a1, .LBB107_10
-; RV32-NEXT:  .LBB107_2: # %else2
-; RV32-NEXT:    andi a1, a0, 4
-; RV32-NEXT:    bnez a1, .LBB107_11
-; RV32-NEXT:  .LBB107_3: # %else5
-; RV32-NEXT:    andi a1, a0, 8
-; RV32-NEXT:    bnez a1, .LBB107_12
-; RV32-NEXT:  .LBB107_4: # %else8
-; RV32-NEXT:    andi a1, a0, 16
-; RV32-NEXT:    bnez a1, .LBB107_13
-; RV32-NEXT:  .LBB107_5: # %else11
-; RV32-NEXT:    andi a1, a0, 32
-; RV32-NEXT:    bnez a1, .LBB107_14
-; RV32-NEXT:  .LBB107_6: # %else14
-; RV32-NEXT:    andi a1, a0, 64
-; RV32-NEXT:    bnez a1, .LBB107_15
-; RV32-NEXT:  .LBB107_7: # %else17
-; RV32-NEXT:    andi a0, a0, -128
-; RV32-NEXT:    bnez a0, .LBB107_16
-; RV32-NEXT:  .LBB107_8: # %else20
-; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB107_9: # %cond.load
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vmv.x.s a1, v10
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a1
-; RV32-NEXT:    andi a1, a0, 2
-; RV32-NEXT:    beqz a1, .LBB107_2
-; RV32-NEXT:  .LBB107_10: # %cond.load1
+; RV32-NEXT:    lbu a1, 0(a0)
+; RV32-NEXT:    lbu a0, 1(a0)
 ; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v10, 1
-; RV32-NEXT:    vmv.x.s a1, v9
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 1
-; RV32-NEXT:    andi a1, a0, 4
-; RV32-NEXT:    beqz a1, .LBB107_3
-; RV32-NEXT:  .LBB107_11: # %cond.load4
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v10, 2
-; RV32-NEXT:    vmv.x.s a1, v9
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 2
-; RV32-NEXT:    andi a1, a0, 8
-; RV32-NEXT:    beqz a1, .LBB107_4
-; RV32-NEXT:  .LBB107_12: # %cond.load7
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v10, 3
-; RV32-NEXT:    vmv.x.s a1, v9
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 3
-; RV32-NEXT:    andi a1, a0, 16
-; RV32-NEXT:    beqz a1, .LBB107_5
-; RV32-NEXT:  .LBB107_13: # %cond.load10
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v10, 4
-; RV32-NEXT:    vmv.x.s a1, v12
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 4
-; RV32-NEXT:    andi a1, a0, 32
-; RV32-NEXT:    beqz a1, .LBB107_6
-; RV32-NEXT:  .LBB107_14: # %cond.load13
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v10, 5
-; RV32-NEXT:    vmv.x.s a1, v12
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 5
-; RV32-NEXT:    andi a1, a0, 64
-; RV32-NEXT:    beqz a1, .LBB107_7
-; RV32-NEXT:  .LBB107_15: # %cond.load16
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v10, 6
-; RV32-NEXT:    vmv.x.s a1, v12
-; RV32-NEXT:    lbu a2, 1(a1)
-; RV32-NEXT:    lbu a1, 0(a1)
-; RV32-NEXT:    slli a2, a2, 8
-; RV32-NEXT:    or a1, a2, a1
-; RV32-NEXT:    vmv.s.x v9, a1
-; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 6
-; RV32-NEXT:    andi a0, a0, -128
-; RV32-NEXT:    beqz a0, .LBB107_8
-; RV32-NEXT:  .LBB107_16: # %cond.load19
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v10, 7
-; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    lbu a1, 1(a0)
-; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    vslidedown.vi v10, v8, 1
+; RV32-NEXT:    vmv.x.s a2, v10
+; RV32-NEXT:    lbu a3, 1(a2)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    slli a0, a0, 8
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a2, a3, a2
+; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    vmv.x.s a1, v10
+; RV32-NEXT:    lbu a3, 0(a1)
+; RV32-NEXT:    lbu a1, 1(a1)
+; RV32-NEXT:    vslidedown.vi v10, v8, 3
+; RV32-NEXT:    vmv.x.s a4, v10
+; RV32-NEXT:    lbu a5, 1(a4)
+; RV32-NEXT:    lbu a4, 0(a4)
 ; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    vmv.s.x v9, a0
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 7
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    slli a5, a5, 8
+; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v8, 4
+; RV32-NEXT:    vmv.x.s a3, v10
+; RV32-NEXT:    lbu a5, 0(a3)
+; RV32-NEXT:    lbu a3, 1(a3)
+; RV32-NEXT:    vslidedown.vi v10, v8, 5
+; RV32-NEXT:    vmv.x.s a6, v10
+; RV32-NEXT:    lbu a7, 1(a6)
+; RV32-NEXT:    lbu a6, 0(a6)
+; RV32-NEXT:    slli a3, a3, 8
+; RV32-NEXT:    or a3, a3, a5
+; RV32-NEXT:    slli a7, a7, 8
+; RV32-NEXT:    or a5, a7, a6
+; RV32-NEXT:    vslidedown.vi v10, v8, 6
+; RV32-NEXT:    vmv.x.s a6, v10
+; RV32-NEXT:    lbu a7, 0(a6)
+; RV32-NEXT:    lbu a6, 1(a6)
+; RV32-NEXT:    vslidedown.vi v8, v8, 7
+; RV32-NEXT:    vmv.x.s t0, v8
+; RV32-NEXT:    lbu t1, 1(t0)
+; RV32-NEXT:    lbu t0, 0(t0)
+; RV32-NEXT:    slli a6, a6, 8
+; RV32-NEXT:    or a6, a6, a7
+; RV32-NEXT:    slli t1, t1, 8
+; RV32-NEXT:    or a7, t1, t0
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vslide1down.vx v8, v8, a2
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vslide1down.vx v9, v8, a4
+; RV32-NEXT:    vmv.v.x v8, a3
+; RV32-NEXT:    vslide1down.vx v8, v8, a5
+; RV32-NEXT:    vslide1down.vx v8, v8, a6
+; RV32-NEXT:    vmv.v.i v0, 15
+; RV32-NEXT:    vslide1down.vx v8, v8, a7
+; RV32-NEXT:    vslidedown.vi v8, v9, 4, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_strided_unaligned:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vmset.m v8
-; RV64V-NEXT:    vid.v v12
-; RV64V-NEXT:    vsll.vi v12, v12, 2
-; RV64V-NEXT:    vadd.vx v12, v12, a0
-; RV64V-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; RV64V-NEXT:    vmv.x.s a0, v8
-; RV64V-NEXT:    # implicit-def: $v8
-; RV64V-NEXT:    beqz zero, .LBB107_11
-; RV64V-NEXT:  # %bb.1: # %else
-; RV64V-NEXT:    andi a1, a0, 2
-; RV64V-NEXT:    bnez a1, .LBB107_12
-; RV64V-NEXT:  .LBB107_2: # %else2
-; RV64V-NEXT:    andi a1, a0, 4
-; RV64V-NEXT:    bnez a1, .LBB107_13
-; RV64V-NEXT:  .LBB107_3: # %else5
-; RV64V-NEXT:    andi a1, a0, 8
-; RV64V-NEXT:    beqz a1, .LBB107_5
-; RV64V-NEXT:  .LBB107_4: # %cond.load7
-; RV64V-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64V-NEXT:    vslidedown.vi v10, v12, 3
-; RV64V-NEXT:    vmv.x.s a1, v10
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 3
-; RV64V-NEXT:  .LBB107_5: # %else8
-; RV64V-NEXT:    addi sp, sp, -320
-; RV64V-NEXT:    .cfi_def_cfa_offset 320
-; RV64V-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
-; RV64V-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
+; RV64V-NEXT:    addi sp, sp, -128
+; RV64V-NEXT:    .cfi_def_cfa_offset 128
+; RV64V-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64V-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
 ; RV64V-NEXT:    .cfi_offset ra, -8
 ; RV64V-NEXT:    .cfi_offset s0, -16
-; RV64V-NEXT:    addi s0, sp, 320
+; RV64V-NEXT:    addi s0, sp, 128
 ; RV64V-NEXT:    .cfi_def_cfa s0, 0
 ; RV64V-NEXT:    andi sp, sp, -64
-; RV64V-NEXT:    andi a1, a0, 16
-; RV64V-NEXT:    bnez a1, .LBB107_14
-; RV64V-NEXT:  # %bb.6: # %else11
-; RV64V-NEXT:    andi a1, a0, 32
-; RV64V-NEXT:    bnez a1, .LBB107_15
-; RV64V-NEXT:  .LBB107_7: # %else14
-; RV64V-NEXT:    andi a1, a0, 64
-; RV64V-NEXT:    bnez a1, .LBB107_16
-; RV64V-NEXT:  .LBB107_8: # %else17
-; RV64V-NEXT:    andi a0, a0, -128
-; RV64V-NEXT:    beqz a0, .LBB107_10
-; RV64V-NEXT:  .LBB107_9: # %cond.load19
-; RV64V-NEXT:    mv a0, sp
 ; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vse64.v v12, (a0)
-; RV64V-NEXT:    ld a0, 56(sp)
-; RV64V-NEXT:    lbu a1, 1(a0)
-; RV64V-NEXT:    lbu a0, 0(a0)
-; RV64V-NEXT:    slli a1, a1, 8
-; RV64V-NEXT:    or a0, a1, a0
-; RV64V-NEXT:    vmv.s.x v9, a0
-; RV64V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 7
-; RV64V-NEXT:  .LBB107_10: # %else20
-; RV64V-NEXT:    addi sp, s0, -320
-; RV64V-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
-; RV64V-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
-; RV64V-NEXT:    addi sp, sp, 320
-; RV64V-NEXT:    ret
-; RV64V-NEXT:  .LBB107_11: # %cond.load
-; RV64V-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64V-NEXT:    vmv.x.s a1, v12
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64V-NEXT:    vmv.v.x v8, a1
-; RV64V-NEXT:    andi a1, a0, 2
-; RV64V-NEXT:    beqz a1, .LBB107_2
-; RV64V-NEXT:  .LBB107_12: # %cond.load1
+; RV64V-NEXT:    vid.v v8
+; RV64V-NEXT:    vsll.vi v8, v8, 2
+; RV64V-NEXT:    vadd.vx v8, v8, a0
+; RV64V-NEXT:    vmv.x.s a0, v8
+; RV64V-NEXT:    lbu a1, 0(a0)
+; RV64V-NEXT:    lbu a0, 1(a0)
 ; RV64V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64V-NEXT:    vslidedown.vi v9, v12, 1
-; RV64V-NEXT:    vmv.x.s a1, v9
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 1
-; RV64V-NEXT:    andi a1, a0, 4
-; RV64V-NEXT:    beqz a1, .LBB107_3
-; RV64V-NEXT:  .LBB107_13: # %cond.load4
+; RV64V-NEXT:    vslidedown.vi v12, v8, 1
+; RV64V-NEXT:    vmv.x.s a2, v12
+; RV64V-NEXT:    lbu a3, 1(a2)
+; RV64V-NEXT:    lbu a2, 0(a2)
+; RV64V-NEXT:    slli a0, a0, 8
+; RV64V-NEXT:    or a0, a0, a1
+; RV64V-NEXT:    slli a1, a3, 8
+; RV64V-NEXT:    or a1, a1, a2
 ; RV64V-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64V-NEXT:    vslidedown.vi v10, v12, 2
-; RV64V-NEXT:    vmv.x.s a1, v10
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 2
-; RV64V-NEXT:    andi a1, a0, 8
-; RV64V-NEXT:    bnez a1, .LBB107_4
-; RV64V-NEXT:    j .LBB107_5
-; RV64V-NEXT:  .LBB107_14: # %cond.load10
-; RV64V-NEXT:    addi a1, sp, 192
-; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vse64.v v12, (a1)
-; RV64V-NEXT:    ld a1, 224(sp)
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 4
-; RV64V-NEXT:    andi a1, a0, 32
-; RV64V-NEXT:    beqz a1, .LBB107_7
-; RV64V-NEXT:  .LBB107_15: # %cond.load13
-; RV64V-NEXT:    addi a1, sp, 128
-; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vse64.v v12, (a1)
-; RV64V-NEXT:    ld a1, 168(sp)
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
-; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 5
-; RV64V-NEXT:    andi a1, a0, 64
-; RV64V-NEXT:    beqz a1, .LBB107_8
-; RV64V-NEXT:  .LBB107_16: # %cond.load16
-; RV64V-NEXT:    addi a1, sp, 64
-; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vse64.v v12, (a1)
-; RV64V-NEXT:    ld a1, 112(sp)
-; RV64V-NEXT:    lbu a2, 1(a1)
-; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    vslidedown.vi v12, v8, 2
+; RV64V-NEXT:    vmv.x.s a2, v12
+; RV64V-NEXT:    lbu a3, 0(a2)
+; RV64V-NEXT:    lbu a2, 1(a2)
+; RV64V-NEXT:    vslidedown.vi v12, v8, 3
+; RV64V-NEXT:    vmv.x.s a4, v12
+; RV64V-NEXT:    lbu a5, 0(a4)
+; RV64V-NEXT:    lbu a4, 1(a4)
+; RV64V-NEXT:    mv a6, sp
+; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64V-NEXT:    vse64.v v8, (a6)
+; RV64V-NEXT:    ld a6, 32(sp)
 ; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a1, a2, a1
-; RV64V-NEXT:    vmv.s.x v9, a1
-; RV64V-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64V-NEXT:    vslideup.vi v8, v9, 6
-; RV64V-NEXT:    andi a0, a0, -128
-; RV64V-NEXT:    bnez a0, .LBB107_9
-; RV64V-NEXT:    j .LBB107_10
+; RV64V-NEXT:    or a2, a2, a3
+; RV64V-NEXT:    slli a4, a4, 8
+; RV64V-NEXT:    lbu a3, 1(a6)
+; RV64V-NEXT:    ld a7, 40(sp)
+; RV64V-NEXT:    lbu a6, 0(a6)
+; RV64V-NEXT:    or a4, a4, a5
+; RV64V-NEXT:    slli a3, a3, 8
+; RV64V-NEXT:    lbu a5, 1(a7)
+; RV64V-NEXT:    or a3, a3, a6
+; RV64V-NEXT:    lbu a6, 0(a7)
+; RV64V-NEXT:    ld a7, 48(sp)
+; RV64V-NEXT:    slli a5, a5, 8
+; RV64V-NEXT:    ld t0, 56(sp)
+; RV64V-NEXT:    or a5, a5, a6
+; RV64V-NEXT:    lbu a6, 1(a7)
+; RV64V-NEXT:    lbu a7, 0(a7)
+; RV64V-NEXT:    lbu t1, 1(t0)
+; RV64V-NEXT:    lbu t0, 0(t0)
+; RV64V-NEXT:    slli a6, a6, 8
+; RV64V-NEXT:    or a6, a6, a7
+; RV64V-NEXT:    slli t1, t1, 8
+; RV64V-NEXT:    or a7, t1, t0
+; RV64V-NEXT:    vmv.v.x v8, a0
+; RV64V-NEXT:    vslide1down.vx v8, v8, a1
+; RV64V-NEXT:    vslide1down.vx v8, v8, a2
+; RV64V-NEXT:    vslide1down.vx v9, v8, a4
+; RV64V-NEXT:    vmv.v.x v8, a3
+; RV64V-NEXT:    vslide1down.vx v8, v8, a5
+; RV64V-NEXT:    vslide1down.vx v8, v8, a6
+; RV64V-NEXT:    vmv.v.i v0, 15
+; RV64V-NEXT:    vslide1down.vx v8, v8, a7
+; RV64V-NEXT:    vslidedown.vi v8, v9, 4, v0.t
+; RV64V-NEXT:    addi sp, s0, -128
+; RV64V-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64V-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64V-NEXT:    addi sp, sp, 128
+; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_unaligned:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB107_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_10
-; RV64ZVE32F-NEXT:  .LBB107_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_11
-; RV64ZVE32F-NEXT:  .LBB107_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_12
-; RV64ZVE32F-NEXT:  .LBB107_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_13
-; RV64ZVE32F-NEXT:  .LBB107_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_14
-; RV64ZVE32F-NEXT:  .LBB107_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_15
-; RV64ZVE32F-NEXT:  .LBB107_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB107_16
-; RV64ZVE32F-NEXT:  .LBB107_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB107_9: # %cond.load
-; RV64ZVE32F-NEXT:    lbu a2, 1(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 0(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.v.x v8, a2
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_2
-; RV64ZVE32F-NEXT:  .LBB107_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lbu a2, 5(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 4(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_3
-; RV64ZVE32F-NEXT:  .LBB107_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lbu a1, 1(a0)
+; RV64ZVE32F-NEXT:    lbu a2, 0(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 5(a0)
+; RV64ZVE32F-NEXT:    lbu a4, 4(a0)
+; RV64ZVE32F-NEXT:    slli a1, a1, 8
+; RV64ZVE32F-NEXT:    or a1, a1, a2
+; RV64ZVE32F-NEXT:    slli a3, a3, 8
+; RV64ZVE32F-NEXT:    or a3, a3, a4
 ; RV64ZVE32F-NEXT:    lbu a2, 9(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 8(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_4
-; RV64ZVE32F-NEXT:  .LBB107_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lbu a2, 13(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 12(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_5
-; RV64ZVE32F-NEXT:  .LBB107_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lbu a2, 17(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 16(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_6
-; RV64ZVE32F-NEXT:  .LBB107_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lbu a2, 21(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 20(a0)
-; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_7
-; RV64ZVE32F-NEXT:  .LBB107_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lbu a2, 25(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 24(a0)
+; RV64ZVE32F-NEXT:    lbu a4, 8(a0)
+; RV64ZVE32F-NEXT:    lbu a5, 13(a0)
+; RV64ZVE32F-NEXT:    lbu a6, 12(a0)
 ; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a3
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB107_8
-; RV64ZVE32F-NEXT:  .LBB107_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lbu a1, 29(a0)
+; RV64ZVE32F-NEXT:    or a2, a2, a4
+; RV64ZVE32F-NEXT:    slli a5, a5, 8
+; RV64ZVE32F-NEXT:    or a4, a5, a6
+; RV64ZVE32F-NEXT:    lbu a5, 17(a0)
+; RV64ZVE32F-NEXT:    lbu a6, 16(a0)
+; RV64ZVE32F-NEXT:    lbu a7, 21(a0)
+; RV64ZVE32F-NEXT:    lbu t0, 20(a0)
+; RV64ZVE32F-NEXT:    slli a5, a5, 8
+; RV64ZVE32F-NEXT:    or a5, a5, a6
+; RV64ZVE32F-NEXT:    slli a7, a7, 8
+; RV64ZVE32F-NEXT:    or a6, a7, t0
+; RV64ZVE32F-NEXT:    lbu a7, 25(a0)
+; RV64ZVE32F-NEXT:    lbu t0, 24(a0)
+; RV64ZVE32F-NEXT:    lbu t1, 29(a0)
 ; RV64ZVE32F-NEXT:    lbu a0, 28(a0)
-; RV64ZVE32F-NEXT:    slli a1, a1, 8
-; RV64ZVE32F-NEXT:    or a0, a1, a0
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    slli a7, a7, 8
+; RV64ZVE32F-NEXT:    or a7, a7, t0
+; RV64ZVE32F-NEXT:    slli t1, t1, 8
+; RV64ZVE32F-NEXT:    or a0, t1, a0
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vmv.v.x v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v8, a4
+; RV64ZVE32F-NEXT:    vmv.v.x v8, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a0
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v9, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13436,91 +12881,27 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB108_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_10
-; RV64ZVE32F-NEXT:  .LBB108_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_11
-; RV64ZVE32F-NEXT:  .LBB108_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_12
-; RV64ZVE32F-NEXT:  .LBB108_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_13
-; RV64ZVE32F-NEXT:  .LBB108_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_14
-; RV64ZVE32F-NEXT:  .LBB108_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB108_15
-; RV64ZVE32F-NEXT:  .LBB108_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB108_16
-; RV64ZVE32F-NEXT:  .LBB108_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB108_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_2
-; RV64ZVE32F-NEXT:  .LBB108_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_3
-; RV64ZVE32F-NEXT:  .LBB108_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 2(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_4
-; RV64ZVE32F-NEXT:  .LBB108_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_5
-; RV64ZVE32F-NEXT:  .LBB108_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_6
-; RV64ZVE32F-NEXT:  .LBB108_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB108_7
-; RV64ZVE32F-NEXT:  .LBB108_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 24(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB108_8
-; RV64ZVE32F-NEXT:  .LBB108_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 26(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 10(a0)
+; RV64ZVE32F-NEXT:    lh a4, 18(a0)
+; RV64ZVE32F-NEXT:    lh a5, 24(a0)
+; RV64ZVE32F-NEXT:    lh a6, 26(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 16
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13544,92 +12925,28 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_2xSEW_with_offset:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB109_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_10
-; RV64ZVE32F-NEXT:  .LBB109_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_11
-; RV64ZVE32F-NEXT:  .LBB109_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_12
-; RV64ZVE32F-NEXT:  .LBB109_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_13
-; RV64ZVE32F-NEXT:  .LBB109_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_14
-; RV64ZVE32F-NEXT:  .LBB109_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB109_15
-; RV64ZVE32F-NEXT:  .LBB109_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB109_16
-; RV64ZVE32F-NEXT:  .LBB109_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB109_9: # %cond.load
-; RV64ZVE32F-NEXT:    addi a2, a0, 4
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_2
-; RV64ZVE32F-NEXT:  .LBB109_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a1, a0, 4
 ; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_3
-; RV64ZVE32F-NEXT:  .LBB109_11: # %cond.load4
-; RV64ZVE32F-NEXT:    lh a2, 12(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_4
-; RV64ZVE32F-NEXT:  .LBB109_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 14(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_5
-; RV64ZVE32F-NEXT:  .LBB109_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_6
-; RV64ZVE32F-NEXT:  .LBB109_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB109_7
-; RV64ZVE32F-NEXT:  .LBB109_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 28(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB109_8
-; RV64ZVE32F-NEXT:  .LBB109_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 30(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 12(a0)
+; RV64ZVE32F-NEXT:    lh a4, 14(a0)
+; RV64ZVE32F-NEXT:    lh a5, 22(a0)
+; RV64ZVE32F-NEXT:    lh a6, 28(a0)
+; RV64ZVE32F-NEXT:    lh a7, 30(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 20
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 2, i64 3, i64 6, i64 7, i64 10, i64 11, i64 14, i64 15>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13653,92 +12970,28 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_reverse_unit_strided_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB110_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_10
-; RV64ZVE32F-NEXT:  .LBB110_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_11
-; RV64ZVE32F-NEXT:  .LBB110_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_12
-; RV64ZVE32F-NEXT:  .LBB110_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_13
-; RV64ZVE32F-NEXT:  .LBB110_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_14
-; RV64ZVE32F-NEXT:  .LBB110_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB110_15
-; RV64ZVE32F-NEXT:  .LBB110_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB110_16
-; RV64ZVE32F-NEXT:  .LBB110_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB110_9: # %cond.load
-; RV64ZVE32F-NEXT:    addi a2, a0, 28
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_2
-; RV64ZVE32F-NEXT:  .LBB110_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a1, a0, 28
 ; RV64ZVE32F-NEXT:    lh a2, 30(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_3
-; RV64ZVE32F-NEXT:  .LBB110_11: # %cond.load4
-; RV64ZVE32F-NEXT:    lh a2, 24(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_4
-; RV64ZVE32F-NEXT:  .LBB110_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 26(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_5
-; RV64ZVE32F-NEXT:  .LBB110_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_6
-; RV64ZVE32F-NEXT:  .LBB110_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB110_7
-; RV64ZVE32F-NEXT:  .LBB110_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB110_8
-; RV64ZVE32F-NEXT:  .LBB110_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 24(a0)
+; RV64ZVE32F-NEXT:    lh a4, 26(a0)
+; RV64ZVE32F-NEXT:    lh a5, 22(a0)
+; RV64ZVE32F-NEXT:    lh a6, 16(a0)
+; RV64ZVE32F-NEXT:    lh a7, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 20
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 14, i64 15, i64 12, i64 13, i64 10, i64 11, i64 8, i64 9>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13762,92 +13015,28 @@ define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_reverse_strided_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB111_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_10
-; RV64ZVE32F-NEXT:  .LBB111_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_11
-; RV64ZVE32F-NEXT:  .LBB111_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_12
-; RV64ZVE32F-NEXT:  .LBB111_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_13
-; RV64ZVE32F-NEXT:  .LBB111_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_14
-; RV64ZVE32F-NEXT:  .LBB111_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB111_15
-; RV64ZVE32F-NEXT:  .LBB111_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB111_16
-; RV64ZVE32F-NEXT:  .LBB111_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB111_9: # %cond.load
-; RV64ZVE32F-NEXT:    addi a2, a0, 28
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_2
-; RV64ZVE32F-NEXT:  .LBB111_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a1, a0, 28
 ; RV64ZVE32F-NEXT:    lh a2, 30(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_3
-; RV64ZVE32F-NEXT:  .LBB111_11: # %cond.load4
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_4
-; RV64ZVE32F-NEXT:  .LBB111_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_5
-; RV64ZVE32F-NEXT:  .LBB111_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 12(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_6
-; RV64ZVE32F-NEXT:  .LBB111_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 14(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB111_7
-; RV64ZVE32F-NEXT:  .LBB111_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB111_8
-; RV64ZVE32F-NEXT:  .LBB111_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 20(a0)
+; RV64ZVE32F-NEXT:    lh a4, 22(a0)
+; RV64ZVE32F-NEXT:    lh a5, 14(a0)
+; RV64ZVE32F-NEXT:    lh a6, 4(a0)
+; RV64ZVE32F-NEXT:    lh a7, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 12
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 14, i64 15, i64 10, i64 11, i64 6, i64 7, i64 2, i64 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13870,91 +13059,27 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB112_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_10
-; RV64ZVE32F-NEXT:  .LBB112_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_11
-; RV64ZVE32F-NEXT:  .LBB112_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_12
-; RV64ZVE32F-NEXT:  .LBB112_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_13
-; RV64ZVE32F-NEXT:  .LBB112_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_14
-; RV64ZVE32F-NEXT:  .LBB112_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB112_15
-; RV64ZVE32F-NEXT:  .LBB112_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB112_16
-; RV64ZVE32F-NEXT:  .LBB112_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB112_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_2
-; RV64ZVE32F-NEXT:  .LBB112_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_3
-; RV64ZVE32F-NEXT:  .LBB112_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 2(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_4
-; RV64ZVE32F-NEXT:  .LBB112_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_5
-; RV64ZVE32F-NEXT:  .LBB112_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_6
-; RV64ZVE32F-NEXT:  .LBB112_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB112_7
-; RV64ZVE32F-NEXT:  .LBB112_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB112_8
-; RV64ZVE32F-NEXT:  .LBB112_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 18(a0)
+; RV64ZVE32F-NEXT:    lh a4, 10(a0)
+; RV64ZVE32F-NEXT:    lh a5, 4(a0)
+; RV64ZVE32F-NEXT:    lh a6, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13980,91 +13105,27 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW_unaligned:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB113_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_10
-; RV64ZVE32F-NEXT:  .LBB113_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_11
-; RV64ZVE32F-NEXT:  .LBB113_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_12
-; RV64ZVE32F-NEXT:  .LBB113_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_13
-; RV64ZVE32F-NEXT:  .LBB113_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_14
-; RV64ZVE32F-NEXT:  .LBB113_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB113_15
-; RV64ZVE32F-NEXT:  .LBB113_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB113_16
-; RV64ZVE32F-NEXT:  .LBB113_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB113_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_2
-; RV64ZVE32F-NEXT:  .LBB113_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_3
-; RV64ZVE32F-NEXT:  .LBB113_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 2(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_4
-; RV64ZVE32F-NEXT:  .LBB113_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_5
-; RV64ZVE32F-NEXT:  .LBB113_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_6
-; RV64ZVE32F-NEXT:  .LBB113_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB113_7
-; RV64ZVE32F-NEXT:  .LBB113_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB113_8
-; RV64ZVE32F-NEXT:  .LBB113_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 20(a0)
+; RV64ZVE32F-NEXT:    lh a4, 10(a0)
+; RV64ZVE32F-NEXT:    lh a5, 4(a0)
+; RV64ZVE32F-NEXT:    lh a6, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14091,92 +13152,27 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned2(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW_unaligned2:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB114_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_10
-; RV64ZVE32F-NEXT:  .LBB114_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_11
-; RV64ZVE32F-NEXT:  .LBB114_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_12
-; RV64ZVE32F-NEXT:  .LBB114_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_13
-; RV64ZVE32F-NEXT:  .LBB114_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_14
-; RV64ZVE32F-NEXT:  .LBB114_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB114_15
-; RV64ZVE32F-NEXT:  .LBB114_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB114_16
-; RV64ZVE32F-NEXT:  .LBB114_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB114_9: # %cond.load
-; RV64ZVE32F-NEXT:    addi a2, a0, 2
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_2
-; RV64ZVE32F-NEXT:  .LBB114_10: # %cond.load1
+; RV64ZVE32F-NEXT:    addi a1, a0, 2
 ; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_3
-; RV64ZVE32F-NEXT:  .LBB114_11: # %cond.load4
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_4
-; RV64ZVE32F-NEXT:  .LBB114_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_5
-; RV64ZVE32F-NEXT:  .LBB114_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_6
-; RV64ZVE32F-NEXT:  .LBB114_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB114_7
-; RV64ZVE32F-NEXT:  .LBB114_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB114_8
-; RV64ZVE32F-NEXT:  .LBB114_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 18(a0)
+; RV64ZVE32F-NEXT:    lh a4, 20(a0)
+; RV64ZVE32F-NEXT:    lh a5, 10(a0)
+; RV64ZVE32F-NEXT:    lh a6, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 1, i32 2, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14206,91 +13202,27 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_4xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB115_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_10
-; RV64ZVE32F-NEXT:  .LBB115_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_11
-; RV64ZVE32F-NEXT:  .LBB115_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_12
-; RV64ZVE32F-NEXT:  .LBB115_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_13
-; RV64ZVE32F-NEXT:  .LBB115_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_14
-; RV64ZVE32F-NEXT:  .LBB115_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB115_15
-; RV64ZVE32F-NEXT:  .LBB115_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB115_16
-; RV64ZVE32F-NEXT:  .LBB115_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB115_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_2
-; RV64ZVE32F-NEXT:  .LBB115_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_3
-; RV64ZVE32F-NEXT:  .LBB115_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 2(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_4
-; RV64ZVE32F-NEXT:  .LBB115_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_5
-; RV64ZVE32F-NEXT:  .LBB115_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_6
-; RV64ZVE32F-NEXT:  .LBB115_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB115_7
-; RV64ZVE32F-NEXT:  .LBB115_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB115_8
-; RV64ZVE32F-NEXT:  .LBB115_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 6(a0)
+; RV64ZVE32F-NEXT:    lh a4, 18(a0)
+; RV64ZVE32F-NEXT:    lh a5, 20(a0)
+; RV64ZVE32F-NEXT:    lh a6, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 16
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 8, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14317,91 +13249,27 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_4xSEW_partial_align:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB116_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_10
-; RV64ZVE32F-NEXT:  .LBB116_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_11
-; RV64ZVE32F-NEXT:  .LBB116_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_12
-; RV64ZVE32F-NEXT:  .LBB116_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_13
-; RV64ZVE32F-NEXT:  .LBB116_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_14
-; RV64ZVE32F-NEXT:  .LBB116_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB116_15
-; RV64ZVE32F-NEXT:  .LBB116_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB116_16
-; RV64ZVE32F-NEXT:  .LBB116_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB116_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_2
-; RV64ZVE32F-NEXT:  .LBB116_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_3
-; RV64ZVE32F-NEXT:  .LBB116_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 2(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_4
-; RV64ZVE32F-NEXT:  .LBB116_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_5
-; RV64ZVE32F-NEXT:  .LBB116_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_6
-; RV64ZVE32F-NEXT:  .LBB116_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB116_7
-; RV64ZVE32F-NEXT:  .LBB116_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 20(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB116_8
-; RV64ZVE32F-NEXT:  .LBB116_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 6(a0)
+; RV64ZVE32F-NEXT:    lh a4, 18(a0)
+; RV64ZVE32F-NEXT:    lh a5, 20(a0)
+; RV64ZVE32F-NEXT:    lh a6, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 16
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14413,10 +13281,8 @@ define <8 x i16> @mgather_shuffle_reverse(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14439,92 +13305,27 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_shuffle_rotate:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB118_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_10
-; RV64ZVE32F-NEXT:  .LBB118_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_11
-; RV64ZVE32F-NEXT:  .LBB118_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_12
-; RV64ZVE32F-NEXT:  .LBB118_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_13
-; RV64ZVE32F-NEXT:  .LBB118_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_14
-; RV64ZVE32F-NEXT:  .LBB118_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB118_15
-; RV64ZVE32F-NEXT:  .LBB118_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB118_16
-; RV64ZVE32F-NEXT:  .LBB118_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB118_9: # %cond.load
-; RV64ZVE32F-NEXT:    addi a2, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_2
-; RV64ZVE32F-NEXT:  .LBB118_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_3
-; RV64ZVE32F-NEXT:  .LBB118_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 10(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 12(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_4
-; RV64ZVE32F-NEXT:  .LBB118_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 14(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_5
-; RV64ZVE32F-NEXT:  .LBB118_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_6
-; RV64ZVE32F-NEXT:  .LBB118_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB118_7
-; RV64ZVE32F-NEXT:  .LBB118_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB118_8
-; RV64ZVE32F-NEXT:  .LBB118_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 14(a0)
+; RV64ZVE32F-NEXT:    lh a4, 2(a0)
+; RV64ZVE32F-NEXT:    lh a5, 4(a0)
+; RV64ZVE32F-NEXT:    lh a6, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a2
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a3
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v9, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -14551,91 +13352,27 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_shuffle_vrgather:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB119_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_10
-; RV64ZVE32F-NEXT:  .LBB119_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_11
-; RV64ZVE32F-NEXT:  .LBB119_3: # %else5
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_12
-; RV64ZVE32F-NEXT:  .LBB119_4: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_13
-; RV64ZVE32F-NEXT:  .LBB119_5: # %else11
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_14
-; RV64ZVE32F-NEXT:  .LBB119_6: # %else14
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB119_15
-; RV64ZVE32F-NEXT:  .LBB119_7: # %else17
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB119_16
-; RV64ZVE32F-NEXT:  .LBB119_8: # %else20
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB119_9: # %cond.load
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_2
-; RV64ZVE32F-NEXT:  .LBB119_10: # %cond.load1
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_3
-; RV64ZVE32F-NEXT:  .LBB119_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a1, 4(a0)
 ; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_4
-; RV64ZVE32F-NEXT:  .LBB119_12: # %cond.load7
-; RV64ZVE32F-NEXT:    lh a2, 2(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_5
-; RV64ZVE32F-NEXT:  .LBB119_13: # %cond.load10
-; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_6
-; RV64ZVE32F-NEXT:  .LBB119_14: # %cond.load13
-; RV64ZVE32F-NEXT:    lh a2, 10(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB119_7
-; RV64ZVE32F-NEXT:  .LBB119_15: # %cond.load16
-; RV64ZVE32F-NEXT:    lh a2, 12(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB119_8
-; RV64ZVE32F-NEXT:  .LBB119_16: # %cond.load19
-; RV64ZVE32F-NEXT:    lh a0, 14(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
-; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    lh a3, 2(a0)
+; RV64ZVE32F-NEXT:    lh a4, 10(a0)
+; RV64ZVE32F-NEXT:    lh a5, 12(a0)
+; RV64ZVE32F-NEXT:    lh a6, 14(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    addi a0, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
+; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
+; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
+; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 2, i64 3, i64 1, i64 4, i64 5, i64 6, i64 7>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -15082,7 +13819,7 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
 ; RV64ZVE32F-NEXT:    addi sp, sp, 144
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr inbounds i64, ptr %base, <32 x i64> <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14, i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30, i64 32, i64 34, i64 36, i64 38, i64 40, i64 42, i64 44, i64 46, i64 48, i64 50, i64 52, i64 54, i64 56, i64 58, i64 60, i64 62>
-  %x = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %ptrs, i32 8, <32 x i1> shufflevector(<32 x i1> insertelement(<32 x i1> poison, i1 true, i32 0), <32 x i1> poison, <32 x i32> zeroinitializer), <32 x i64> poison)
+  %x = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i64> poison)
   ret <32 x i64> %x
 }
 
@@ -15125,7 +13862,7 @@ define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
 ; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr i32, ptr %base, <4 x i64> <i64 34, i64 35, i64 0, i64 1>
-  %x = call <4 x i32> @llvm.masked.gather.v4i32.v32p0(<4 x ptr> %ptrs, i32 8, <4 x i1> shufflevector(<4 x i1> insertelement(<4 x i1> poison, i1 true, i32 0), <4 x i1> poison, <4 x i32> zeroinitializer), <4 x i32> poison)
+  %x = call <4 x i32> @llvm.masked.gather.v4i32.v32p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i32> poison)
   ret <4 x i32> %x
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index b2ff4714556319..aa815e18ac1014 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -336,49 +336,19 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i8:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB6_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB6_6
-; RV64ZVE32F-NEXT:  .LBB6_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB6_7
-; RV64ZVE32F-NEXT:  .LBB6_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB6_8
-; RV64ZVE32F-NEXT:  .LBB6_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB6_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB6_2
-; RV64ZVE32F-NEXT:  .LBB6_6: # %cond.store1
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 8(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT:    vse8.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse8.v v9, (a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB6_3
-; RV64ZVE32F-NEXT:  .LBB6_7: # %cond.store3
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT:    vse8.v v9, (a0)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse8.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB6_4
-; RV64ZVE32F-NEXT:  .LBB6_8: # %cond.store5
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT:    vse8.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -883,49 +853,19 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i16:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB15_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB15_6
-; RV64ZVE32F-NEXT:  .LBB15_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB15_7
-; RV64ZVE32F-NEXT:  .LBB15_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB15_8
-; RV64ZVE32F-NEXT:  .LBB15_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB15_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB15_2
-; RV64ZVE32F-NEXT:  .LBB15_6: # %cond.store1
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 8(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB15_3
-; RV64ZVE32F-NEXT:  .LBB15_7: # %cond.store3
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB15_4
-; RV64ZVE32F-NEXT:  .LBB15_8: # %cond.store5
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -1788,49 +1728,19 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i32:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB26_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB26_6
-; RV64ZVE32F-NEXT:  .LBB26_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB26_7
-; RV64ZVE32F-NEXT:  .LBB26_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB26_8
-; RV64ZVE32F-NEXT:  .LBB26_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB26_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB26_2
-; RV64ZVE32F-NEXT:  .LBB26_6: # %cond.store1
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 8(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse32.v v9, (a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB26_3
-; RV64ZVE32F-NEXT:  .LBB26_7: # %cond.store3
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v9, (a0)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB26_4
-; RV64ZVE32F-NEXT:  .LBB26_8: # %cond.store5
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -3163,50 +3073,22 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
 ; RV32ZVE32F-NEXT:    lw a2, 24(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 20(a0)
 ; RV32ZVE32F-NEXT:    lw a4, 16(a0)
-; RV32ZVE32F-NEXT:    lw a7, 12(a0)
+; RV32ZVE32F-NEXT:    lw a5, 12(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 8(a0)
-; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT:    vmset.m v9
-; RV32ZVE32F-NEXT:    vmv.x.s a5, v9
-; RV32ZVE32F-NEXT:    beqz zero, .LBB39_5
-; RV32ZVE32F-NEXT:  # %bb.1: # %else
-; RV32ZVE32F-NEXT:    andi a0, a5, 2
-; RV32ZVE32F-NEXT:    bnez a0, .LBB39_6
-; RV32ZVE32F-NEXT:  .LBB39_2: # %else2
-; RV32ZVE32F-NEXT:    andi a0, a5, 4
-; RV32ZVE32F-NEXT:    bnez a0, .LBB39_7
-; RV32ZVE32F-NEXT:  .LBB39_3: # %else4
-; RV32ZVE32F-NEXT:    andi a5, a5, 8
-; RV32ZVE32F-NEXT:    bnez a5, .LBB39_8
-; RV32ZVE32F-NEXT:  .LBB39_4: # %else6
-; RV32ZVE32F-NEXT:    ret
-; RV32ZVE32F-NEXT:  .LBB39_5: # %cond.store
-; RV32ZVE32F-NEXT:    lw t0, 4(a0)
-; RV32ZVE32F-NEXT:    lw a0, 0(a0)
-; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s t1, v8
-; RV32ZVE32F-NEXT:    sw t0, 4(t1)
-; RV32ZVE32F-NEXT:    sw a0, 0(t1)
-; RV32ZVE32F-NEXT:    andi a0, a5, 2
-; RV32ZVE32F-NEXT:    beqz a0, .LBB39_2
-; RV32ZVE32F-NEXT:  .LBB39_6: # %cond.store1
+; RV32ZVE32F-NEXT:    lw a7, 0(a0)
+; RV32ZVE32F-NEXT:    lw a0, 4(a0)
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s t0, v8
+; RV32ZVE32F-NEXT:    sw a7, 0(t0)
+; RV32ZVE32F-NEXT:    sw a0, 4(t0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
-; RV32ZVE32F-NEXT:    sw a7, 4(a0)
 ; RV32ZVE32F-NEXT:    sw a6, 0(a0)
-; RV32ZVE32F-NEXT:    andi a0, a5, 4
-; RV32ZVE32F-NEXT:    beqz a0, .LBB39_3
-; RV32ZVE32F-NEXT:  .LBB39_7: # %cond.store3
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    sw a5, 4(a0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
 ; RV32ZVE32F-NEXT:    sw a4, 0(a0)
 ; RV32ZVE32F-NEXT:    sw a3, 4(a0)
-; RV32ZVE32F-NEXT:    andi a5, a5, 8
-; RV32ZVE32F-NEXT:    beqz a5, .LBB39_4
-; RV32ZVE32F-NEXT:  .LBB39_8: # %cond.store5
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
 ; RV32ZVE32F-NEXT:    sw a2, 0(a0)
@@ -3216,46 +3098,19 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i64:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a2, 24(a1)
-; RV64ZVE32F-NEXT:    ld a4, 16(a1)
-; RV64ZVE32F-NEXT:    ld a7, 8(a1)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a5, 16(a0)
-; RV64ZVE32F-NEXT:    ld t0, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a6, v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB39_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a6, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB39_6
-; RV64ZVE32F-NEXT:  .LBB39_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a6, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB39_7
-; RV64ZVE32F-NEXT:  .LBB39_3: # %else4
-; RV64ZVE32F-NEXT:    andi a0, a6, 8
-; RV64ZVE32F-NEXT:    bnez a0, .LBB39_8
-; RV64ZVE32F-NEXT:  .LBB39_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB39_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a3, 16(a1)
+; RV64ZVE32F-NEXT:    ld a4, 8(a1)
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    sd a0, 0(a1)
-; RV64ZVE32F-NEXT:    andi a0, a6, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB39_2
-; RV64ZVE32F-NEXT:  .LBB39_6: # %cond.store1
-; RV64ZVE32F-NEXT:    sd t0, 0(a7)
-; RV64ZVE32F-NEXT:    andi a0, a6, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB39_3
-; RV64ZVE32F-NEXT:  .LBB39_7: # %cond.store3
-; RV64ZVE32F-NEXT:    sd a5, 0(a4)
-; RV64ZVE32F-NEXT:    andi a0, a6, 8
-; RV64ZVE32F-NEXT:    beqz a0, .LBB39_4
-; RV64ZVE32F-NEXT:  .LBB39_8: # %cond.store5
-; RV64ZVE32F-NEXT:    sd a3, 0(a2)
+; RV64ZVE32F-NEXT:    ld a5, 0(a0)
+; RV64ZVE32F-NEXT:    ld a6, 8(a0)
+; RV64ZVE32F-NEXT:    ld a7, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    sd a5, 0(a1)
+; RV64ZVE32F-NEXT:    sd a6, 0(a4)
+; RV64ZVE32F-NEXT:    sd a7, 0(a3)
+; RV64ZVE32F-NEXT:    sd a0, 0(a2)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -6168,49 +6023,19 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f16:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB55_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB55_6
-; RV64ZVE32F-NEXT:  .LBB55_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB55_7
-; RV64ZVE32F-NEXT:  .LBB55_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB55_8
-; RV64ZVE32F-NEXT:  .LBB55_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB55_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB55_2
-; RV64ZVE32F-NEXT:  .LBB55_6: # %cond.store1
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 8(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB55_3
-; RV64ZVE32F-NEXT:  .LBB55_7: # %cond.store3
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB55_4
-; RV64ZVE32F-NEXT:  .LBB55_8: # %cond.store5
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -7020,49 +6845,19 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f32:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB65_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB65_6
-; RV64ZVE32F-NEXT:  .LBB65_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB65_7
-; RV64ZVE32F-NEXT:  .LBB65_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB65_8
-; RV64ZVE32F-NEXT:  .LBB65_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB65_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB65_2
-; RV64ZVE32F-NEXT:  .LBB65_6: # %cond.store1
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 8(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse32.v v9, (a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB65_3
-; RV64ZVE32F-NEXT:  .LBB65_7: # %cond.store3
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v9, (a0)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB65_4
-; RV64ZVE32F-NEXT:  .LBB65_8: # %cond.store5
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -8368,43 +8163,15 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) {
 ;
 ; RV32ZVE32F-LABEL: mscatter_truemask_v4f64:
 ; RV32ZVE32F:       # %bb.0:
-; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT:    vmset.m v9
-; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
-; RV32ZVE32F-NEXT:    beqz zero, .LBB78_5
-; RV32ZVE32F-NEXT:  # %bb.1: # %else
-; RV32ZVE32F-NEXT:    andi a1, a0, 2
-; RV32ZVE32F-NEXT:    bnez a1, .LBB78_6
-; RV32ZVE32F-NEXT:  .LBB78_2: # %else2
-; RV32ZVE32F-NEXT:    andi a1, a0, 4
-; RV32ZVE32F-NEXT:    bnez a1, .LBB78_7
-; RV32ZVE32F-NEXT:  .LBB78_3: # %else4
-; RV32ZVE32F-NEXT:    andi a0, a0, 8
-; RV32ZVE32F-NEXT:    bnez a0, .LBB78_8
-; RV32ZVE32F-NEXT:  .LBB78_4: # %else6
-; RV32ZVE32F-NEXT:    ret
-; RV32ZVE32F-NEXT:  .LBB78_5: # %cond.store
-; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV32ZVE32F-NEXT:    fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT:    andi a1, a0, 2
-; RV32ZVE32F-NEXT:    beqz a1, .LBB78_2
-; RV32ZVE32F-NEXT:  .LBB78_6: # %cond.store1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
+; RV32ZVE32F-NEXT:    fsd fa0, 0(a0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV32ZVE32F-NEXT:    fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT:    andi a1, a0, 4
-; RV32ZVE32F-NEXT:    beqz a1, .LBB78_3
-; RV32ZVE32F-NEXT:  .LBB78_7: # %cond.store3
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
+; RV32ZVE32F-NEXT:    fsd fa1, 0(a0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV32ZVE32F-NEXT:    fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT:    andi a0, a0, 8
-; RV32ZVE32F-NEXT:    beqz a0, .LBB78_4
-; RV32ZVE32F-NEXT:  .LBB78_8: # %cond.store5
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
+; RV32ZVE32F-NEXT:    fsd fa2, 0(a0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
 ; RV32ZVE32F-NEXT:    fsd fa3, 0(a0)
@@ -8412,43 +8179,16 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) {
 ;
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a4, 8(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v8
-; RV64ZVE32F-NEXT:    vmv.x.s a3, v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB78_5
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    bnez a0, .LBB78_6
-; RV64ZVE32F-NEXT:  .LBB78_2: # %else2
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    bnez a0, .LBB78_7
-; RV64ZVE32F-NEXT:  .LBB78_3: # %else4
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    bnez a3, .LBB78_8
-; RV64ZVE32F-NEXT:  .LBB78_4: # %else6
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB78_5: # %cond.store
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
-; RV64ZVE32F-NEXT:    andi a0, a3, 2
-; RV64ZVE32F-NEXT:    beqz a0, .LBB78_2
-; RV64ZVE32F-NEXT:  .LBB78_6: # %cond.store1
-; RV64ZVE32F-NEXT:    fsd fa1, 0(a4)
-; RV64ZVE32F-NEXT:    andi a0, a3, 4
-; RV64ZVE32F-NEXT:    beqz a0, .LBB78_3
-; RV64ZVE32F-NEXT:  .LBB78_7: # %cond.store3
-; RV64ZVE32F-NEXT:    fsd fa2, 0(a2)
-; RV64ZVE32F-NEXT:    andi a3, a3, 8
-; RV64ZVE32F-NEXT:    beqz a3, .LBB78_4
-; RV64ZVE32F-NEXT:  .LBB78_8: # %cond.store5
-; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
+; RV64ZVE32F-NEXT:    ld a1, 0(a0)
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    ld a3, 16(a0)
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    fsd fa0, 0(a1)
+; RV64ZVE32F-NEXT:    fsd fa1, 0(a2)
+; RV64ZVE32F-NEXT:    fsd fa2, 0(a3)
+; RV64ZVE32F-NEXT:    fsd fa3, 0(a0)
 ; RV64ZVE32F-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue)
+  call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1))
   ret void
 }
 
@@ -11344,10 +11084,8 @@ define void @mscatter_unit_stride(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
   ret void
 }
 
@@ -11358,10 +11096,8 @@ define void @mscatter_unit_stride_with_offset(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
   ret void
 }
 
@@ -11373,10 +11109,8 @@ define void @mscatter_shuffle_reverse(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
   ret void
 }
 
@@ -11399,89 +11133,31 @@ define void @mscatter_shuffle_rotate(<8 x i16> %val, ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mscatter_shuffle_rotate:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vmset.m v9
-; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV64ZVE32F-NEXT:    beqz zero, .LBB96_9
-; RV64ZVE32F-NEXT:  # %bb.1: # %else
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_10
-; RV64ZVE32F-NEXT:  .LBB96_2: # %else2
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_11
-; RV64ZVE32F-NEXT:  .LBB96_3: # %else4
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_12
-; RV64ZVE32F-NEXT:  .LBB96_4: # %else6
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_13
-; RV64ZVE32F-NEXT:  .LBB96_5: # %else8
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_14
-; RV64ZVE32F-NEXT:  .LBB96_6: # %else10
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB96_15
-; RV64ZVE32F-NEXT:  .LBB96_7: # %else12
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB96_16
-; RV64ZVE32F-NEXT:  .LBB96_8: # %else14
-; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB96_9: # %cond.store
-; RV64ZVE32F-NEXT:    addi a2, a0, 8
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
-; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_2
-; RV64ZVE32F-NEXT:  .LBB96_10: # %cond.store1
-; RV64ZVE32F-NEXT:    addi a2, a0, 10
+; RV64ZVE32F-NEXT:    addi a1, a0, 6
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    addi a3, a0, 2
+; RV64ZVE32F-NEXT:    addi a4, a0, 14
+; RV64ZVE32F-NEXT:    addi a5, a0, 12
+; RV64ZVE32F-NEXT:    addi a6, a0, 10
+; RV64ZVE32F-NEXT:    addi a7, a0, 8
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a7)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_3
-; RV64ZVE32F-NEXT:  .LBB96_11: # %cond.store3
-; RV64ZVE32F-NEXT:    addi a2, a0, 12
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a6)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_4
-; RV64ZVE32F-NEXT:  .LBB96_12: # %cond.store5
-; RV64ZVE32F-NEXT:    addi a2, a0, 14
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a5)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 3
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_5
-; RV64ZVE32F-NEXT:  .LBB96_13: # %cond.store7
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
-; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_6
-; RV64ZVE32F-NEXT:  .LBB96_14: # %cond.store9
-; RV64ZVE32F-NEXT:    addi a2, a0, 2
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 5
-; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB96_7
-; RV64ZVE32F-NEXT:  .LBB96_15: # %cond.store11
-; RV64ZVE32F-NEXT:    addi a2, a0, 4
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 6
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
-; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB96_8
-; RV64ZVE32F-NEXT:  .LBB96_16: # %cond.store13
-; RV64ZVE32F-NEXT:    addi a0, a0, 6
-; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
+; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i16 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 648fb785cf1512..19f3d3ce19fa4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -42,9 +42,7 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -86,9 +84,7 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -130,9 +126,7 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -176,9 +170,7 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -220,9 +212,7 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -264,9 +254,7 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -310,9 +298,7 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -356,9 +342,7 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -400,9 +384,7 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -446,9 +428,7 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -492,9 +472,7 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -538,9 +516,7 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -584,9 +560,7 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -690,8 +664,6 @@ define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
index 09bc27bdd15e8c..016be04ffc9b96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll
@@ -14,9 +14,7 @@ define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -31,10 +29,8 @@ define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
-  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
+  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -46,10 +42,8 @@ define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
-  %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
+  %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> splat (i1 true), <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
 
@@ -61,9 +55,7 @@ define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x f
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -76,9 +68,7 @@ define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i16> @llvm.vp.merge.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
   ret <8 x i16> %b
 }
@@ -91,9 +81,7 @@ define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -106,9 +94,7 @@ define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -121,9 +107,7 @@ define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -136,9 +120,7 @@ define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
   ret <8 x double> %b
 }
@@ -151,9 +133,7 @@ define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -167,9 +147,7 @@ define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, ptr %p, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -183,10 +161,8 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, ptr %p, <8 x i32> %x, <8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> %mask, i32 %vl)
-  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> splat (i1 true), i32 %vl)
+  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -203,9 +179,7 @@ define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -218,10 +192,8 @@ define <8 x i32> @vpselect_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
-  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
+  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -233,10 +205,8 @@ define <8 x i32> @vpselect_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
-  %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> splat (i1 true), i32 %vl)
+  %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> splat (i1 true), <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
 
@@ -247,9 +217,7 @@ define <8 x float> @vpselect_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -261,9 +229,7 @@ define <8 x i16> @vpselect_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
   ret <8 x i16> %b
 }
@@ -275,9 +241,7 @@ define <8 x float> @vpselect_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -289,9 +253,7 @@ define <8 x i32> @vpselect_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -303,9 +265,7 @@ define <8 x i32> @vpselect_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -317,9 +277,7 @@ define <8 x double> @vpselect_vpfpext(<8 x double> %passthru, <8 x float> %x, <8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
   ret <8 x double> %b
 }
@@ -331,9 +289,7 @@ define <8 x float> @vpselect_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
   ret <8 x float> %b
 }
@@ -345,9 +301,7 @@ define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, ptr %p, <8 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
@@ -360,10 +314,8 @@ define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, ptr %p, <8 x i32> %x, <8
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 true, i32 0
-  %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> %mask, i32 %vl)
-  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+  %a = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %p, <8 x i1> splat (i1 true), i32 %vl)
+  %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> splat (i1 true), i32 %vl)
   %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
   ret <8 x i32> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 3e0fb3009c6b19..920d0d5fe7ba74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -38,9 +38,7 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -78,9 +76,7 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -118,9 +114,7 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -160,9 +154,7 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -200,9 +192,7 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.rint.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.rint.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -240,9 +230,7 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.rint.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.rint.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -282,9 +270,7 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.rint.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.rint.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -324,9 +310,7 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.rint.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.rint.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -364,9 +348,7 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -406,9 +388,7 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -448,9 +428,7 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -490,9 +468,7 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -532,9 +508,7 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -639,8 +613,6 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 96a72d0ddd18b8..6f045349423c7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -86,9 +86,7 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -388,9 +380,7 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -432,9 +422,7 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -478,9 +466,7 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -524,9 +510,7 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -568,9 +552,7 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -614,9 +596,7 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -660,9 +640,7 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -706,9 +684,7 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -752,9 +728,7 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +841,6 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 74a43f09542af4..738d7e37c50bde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -86,9 +86,7 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -388,9 +380,7 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -432,9 +422,7 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -478,9 +466,7 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -524,9 +510,7 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -568,9 +552,7 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -614,9 +596,7 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -660,9 +640,7 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -706,9 +684,7 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -752,9 +728,7 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +841,6 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 91de65c79bb705..6f5b7875266b12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -86,9 +86,7 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -388,9 +380,7 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -432,9 +422,7 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -478,9 +466,7 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -524,9 +510,7 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -568,9 +552,7 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -614,9 +596,7 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -660,9 +640,7 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -706,9 +684,7 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -752,9 +728,7 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +841,6 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 8cf069e66e8f22..981715bd2b998b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -135,9 +135,7 @@ define <8 x i1> @icmp_eq_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -147,9 +145,7 @@ define <8 x i1> @icmp_eq_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -193,9 +189,7 @@ define <8 x i1> @icmp_ne_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -205,9 +199,7 @@ define <8 x i1> @icmp_ne_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -251,9 +243,7 @@ define <8 x i1> @icmp_ugt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -263,9 +253,7 @@ define <8 x i1> @icmp_ugt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -311,9 +299,7 @@ define <8 x i1> @icmp_uge_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -323,9 +309,7 @@ define <8 x i1> @icmp_uge_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -369,9 +353,7 @@ define <8 x i1> @icmp_ult_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -381,9 +363,7 @@ define <8 x i1> @icmp_ult_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -427,9 +407,7 @@ define <8 x i1> @icmp_sgt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -439,9 +417,7 @@ define <8 x i1> @icmp_sgt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -487,9 +463,7 @@ define <8 x i1> @icmp_sge_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -499,9 +473,7 @@ define <8 x i1> @icmp_sge_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -545,9 +517,7 @@ define <8 x i1> @icmp_slt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -557,9 +527,7 @@ define <8 x i1> @icmp_slt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -605,9 +573,7 @@ define <8 x i1> @icmp_sle_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -617,9 +583,7 @@ define <8 x i1> @icmp_sle_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> splat (i8 4), <8 x i8> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -783,9 +747,7 @@ define <8 x i1> @icmp_eq_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmseq.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -796,9 +758,7 @@ define <8 x i1> @icmp_eq_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmseq.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -846,9 +806,7 @@ define <8 x i1> @icmp_ne_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsne.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -859,9 +817,7 @@ define <8 x i1> @icmp_ne_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmsne.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -909,9 +865,7 @@ define <8 x i1> @icmp_ugt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgtu.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -922,9 +876,7 @@ define <8 x i1> @icmp_ugt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsleu.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -974,9 +926,7 @@ define <8 x i1> @icmp_uge_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgtu.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -987,9 +937,7 @@ define <8 x i1> @icmp_uge_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsleu.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1037,9 +985,7 @@ define <8 x i1> @icmp_ult_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsleu.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1050,9 +996,7 @@ define <8 x i1> @icmp_ult_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgtu.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1100,9 +1044,7 @@ define <8 x i1> @icmp_sgt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1113,9 +1055,7 @@ define <8 x i1> @icmp_sgt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsle.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1165,9 +1105,7 @@ define <8 x i1> @icmp_sge_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1178,9 +1116,7 @@ define <8 x i1> @icmp_sge_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsle.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1228,9 +1164,7 @@ define <8 x i1> @icmp_slt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsle.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1241,9 +1175,7 @@ define <8 x i1> @icmp_slt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1293,9 +1225,7 @@ define <8 x i1> @icmp_sle_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsle.vi v10, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1306,9 +1236,7 @@ define <8 x i1> @icmp_sle_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgt.vi v10, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> splat (i32 4), <8 x i32> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1558,9 +1486,7 @@ define <8 x i1> @icmp_eq_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmseq.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1571,9 +1497,7 @@ define <8 x i1> @icmp_eq_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmseq.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"eq", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1651,9 +1575,7 @@ define <8 x i1> @icmp_ne_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsne.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1664,9 +1586,7 @@ define <8 x i1> @icmp_ne_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmsne.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"ne", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1744,9 +1664,7 @@ define <8 x i1> @icmp_ugt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1757,9 +1675,7 @@ define <8 x i1> @icmp_ugt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"ugt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1839,9 +1755,7 @@ define <8 x i1> @icmp_uge_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1852,9 +1766,7 @@ define <8 x i1> @icmp_uge_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"uge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1932,9 +1844,7 @@ define <8 x i1> @icmp_ult_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -1945,9 +1855,7 @@ define <8 x i1> @icmp_ult_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"ult", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2025,9 +1933,7 @@ define <8 x i1> @icmp_sgt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2038,9 +1944,7 @@ define <8 x i1> @icmp_sgt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsle.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"sgt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2120,9 +2024,7 @@ define <8 x i1> @icmp_sge_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2133,9 +2035,7 @@ define <8 x i1> @icmp_sge_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsle.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"sge", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2213,9 +2113,7 @@ define <8 x i1> @icmp_slt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsle.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2226,9 +2124,7 @@ define <8 x i1> @icmp_slt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"slt", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2308,9 +2204,7 @@ define <8 x i1> @icmp_sle_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vmsle.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
 
@@ -2321,8 +2215,6 @@ define <8 x i1> @icmp_sle_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
+  %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> splat (i64 4), <8 x i64> %va, metadata !"sle", <8 x i1> %m, i32 %evl)
   ret <8 x i1> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll
index c2c321bf91fbc4..bd9b66997ff8d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll
@@ -22,7 +22,7 @@ define <4 x i16> @vsext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.sext.v4i16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sext.v4i16.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -46,7 +46,7 @@ define <4 x i32> @vsext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -70,6 +70,6 @@ define <4 x i64> @vsext_v4i64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
index b1d3f5c3a3a6bd..52596d8892411a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
@@ -22,7 +22,7 @@ define <4 x i16> @vsext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.sext.v4i16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sext.v4i16.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -46,7 +46,7 @@ define <4 x i32> @vsext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf4 v9, v8
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -70,7 +70,7 @@ define <4 x i64> @vsext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf8 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -94,7 +94,7 @@ define <4 x i32> @vsext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sext.v4i32.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -118,7 +118,7 @@ define <4 x i64> @vsext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf4 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -142,7 +142,7 @@ define <4 x i64> @vsext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sext.v4i64.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -197,7 +197,7 @@ define <32 x i64> @vsext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll
index 60469eb466b0b7..67c045cc2b1899 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll
@@ -25,7 +25,7 @@ define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -52,7 +52,7 @@ define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -79,6 +79,6 @@ define <4 x double> @vsitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
index a6e55fc3532525..5e93fdfc7a652d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
@@ -73,7 +73,7 @@ define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -111,7 +111,7 @@ define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -151,7 +151,7 @@ define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -193,7 +193,7 @@ define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -217,7 +217,7 @@ define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsext.vf2 v9, v8
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -241,7 +241,7 @@ define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -263,7 +263,7 @@ define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -287,7 +287,7 @@ define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfncvt.f.x.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -311,7 +311,7 @@ define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsext.vf4 v10, v8
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -335,7 +335,7 @@ define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsext.vf2 v10, v8
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -359,7 +359,7 @@ define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -381,7 +381,7 @@ define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -430,6 +430,6 @@ define <32 x double> @vsitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16
 ; CHECK-NEXT:    ret
-  %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 2ae058128eaa00..6a8d2008de74dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -78,9 +78,7 @@ define <4 x i8> @strided_vpload_v4i8_allones_mask(ptr %ptr, i32 signext %stride,
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i32(ptr %ptr, i32 %stride, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %load
 }
 
@@ -158,9 +156,7 @@ define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %strid
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 %stride, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %load
 }
 
@@ -216,9 +212,7 @@ define <8 x i32> @strided_vpload_v8i32_allones_mask(ptr %ptr, i32 signext %strid
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %load
 }
 
@@ -262,9 +256,7 @@ define <4 x i64> @strided_vpload_v4i64_allones_mask(ptr %ptr, i32 signext %strid
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %load
 }
 
@@ -298,9 +290,7 @@ define <2 x half> @strided_vpload_v2f16_allones_mask(ptr %ptr, i32 signext %stri
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
+  %load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0.i32(ptr %ptr, i32 %stride, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %load
 }
 
@@ -390,9 +380,7 @@ define <8 x float> @strided_vpload_v8f32_allones_mask(ptr %ptr, i32 signext %str
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> %b, i32 %evl)
+  %load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr %ptr, i32 %stride, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %load
 }
 
@@ -437,9 +425,7 @@ define <4 x double> @strided_vpload_v4f64_allones_mask(ptr %ptr, i32 signext %st
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> %b, i32 %evl)
+  %load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr %ptr, i32 %stride, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %load
 }
 
@@ -472,9 +458,7 @@ define <3 x double> @strided_vpload_v3f64_allones_mask(ptr %ptr, i32 signext %st
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <3 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <3 x i1> %one, <3 x i1> poison, <3 x i32> zeroinitializer
-  %v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr %ptr, i32 %stride, <3 x i1> %allones, i32 %evl)
+  %v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0.i32(ptr %ptr, i32 %stride, <3 x i1> splat (i1 true), i32 %evl)
   ret <3 x double> %v
 }
 
@@ -530,9 +514,7 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext %
 ; CHECK-NEXT:    vsetvli zero, a3, e64, m8, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <32 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <32 x i1> %one, <32 x i1> poison, <32 x i32> zeroinitializer
-  %load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> %allones, i32 %evl)
+  %load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %load
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
index 6c4960bd407842..dee422a4c17d1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
@@ -376,9 +376,7 @@ define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 sign
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8> %val, ptr %ptr, i32 %stride, <2 x i1> %b, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v2i8.p0.i32(<2 x i8> %val, ptr %ptr, i32 %stride, <2 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
@@ -399,9 +397,7 @@ define void @strided_vpstore_v3f32_allones_mask(<3 x float> %v, ptr %ptr, i32 si
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <3 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <3 x i1> %one, <3 x i1> poison, <3 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float> %v, ptr %ptr, i32 %stride, <3 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v3f32.p0.i32(<3 x float> %v, ptr %ptr, i32 %stride, <3 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
@@ -454,9 +450,7 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vsse64.v v16, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <32 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <32 x i1> %one, <32 x i1> poison, <32 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double> %v, ptr %ptr, i32 %stride, <32 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.v32f64.p0.i32(<32 x double> %v, ptr %ptr, i32 %stride, <32 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll
index 41f956bec9030d..adfb26cd31060c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll
@@ -25,7 +25,7 @@ define <4 x half> @vuitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -52,7 +52,7 @@ define <4 x float> @vuitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -79,6 +79,6 @@ define <4 x double> @vuitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
index 9047400427408f..698c48bc55650c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
@@ -73,7 +73,7 @@ define <4 x half> @vuitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -111,7 +111,7 @@ define <4 x half> @vuitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -151,7 +151,7 @@ define <4 x half> @vuitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -193,7 +193,7 @@ define <4 x half> @vuitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -217,7 +217,7 @@ define <4 x float> @vuitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -241,7 +241,7 @@ define <4 x float> @vuitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -263,7 +263,7 @@ define <4 x float> @vuitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -287,7 +287,7 @@ define <4 x float> @vuitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -311,7 +311,7 @@ define <4 x double> @vuitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vzext.vf4 v10, v8
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -335,7 +335,7 @@ define <4 x double> @vuitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -359,7 +359,7 @@ define <4 x double> @vuitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -381,7 +381,7 @@ define <4 x double> @vuitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -430,6 +430,6 @@ define <32 x double> @vuitofp_v32f64_v32i64_unmasked(<32 x i64> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v16, v16
 ; CHECK-NEXT:    ret
-  %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
index 954edf872aff8d..70b547759938f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
@@ -29,9 +29,7 @@ define <8 x i8> @vaaddu_vx_v8i8_floor(<8 x i8> %x, i8 %y) {
   %ysplat = shufflevector <8 x i8> %yhead, <8 x i8> poison, <8 x i32> zeroinitializer
   %yzv = zext <8 x i8> %ysplat to <8 x i16>
   %add = add nuw nsw <8 x i16> %xzv, %yzv
-  %one = insertelement <8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <8 x i16> %one, <8 x i16> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i16> %add, %splat
+  %div = lshr <8 x i16> %add, splat (i16 1)
   %ret = trunc <8 x i16> %div to <8 x i8>
   ret <8 x i8> %ret
 }
@@ -109,9 +107,7 @@ define <8 x i16> @vaaddu_vx_v8i16_floor(<8 x i16> %x, i16 %y) {
   %ysplat = shufflevector <8 x i16> %yhead, <8 x i16> poison, <8 x i32> zeroinitializer
   %yzv = zext <8 x i16> %ysplat to <8 x i32>
   %add = add nuw nsw <8 x i32> %xzv, %yzv
-  %one = insertelement <8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <8 x i32> %one, <8 x i32> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i32> %add, %splat
+  %div = lshr <8 x i32> %add, splat (i32 1)
   %ret = trunc <8 x i32> %div to <8 x i16>
   ret <8 x i16> %ret
 }
@@ -143,9 +139,7 @@ define <8 x i32> @vaaddu_vx_v8i32_floor(<8 x i32> %x, i32 %y) {
   %ysplat = shufflevector <8 x i32> %yhead, <8 x i32> poison, <8 x i32> zeroinitializer
   %yzv = zext <8 x i32> %ysplat to <8 x i64>
   %add = add nuw nsw <8 x i64> %xzv, %yzv
-  %one = insertelement <8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <8 x i64> %one, <8 x i64> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i64> %add, %splat
+  %div = lshr <8 x i64> %add, splat (i64 1)
   %ret = trunc <8 x i64> %div to <8 x i32>
   ret <8 x i32> %ret
 }
@@ -212,9 +206,7 @@ define <8 x i64> @vaaddu_vx_v8i64_floor(<8 x i64> %x, i64 %y) {
   %ysplat = shufflevector <8 x i64> %yhead, <8 x i64> poison, <8 x i32> zeroinitializer
   %yzv = zext <8 x i64> %ysplat to <8 x i128>
   %add = add nuw nsw <8 x i128> %xzv, %yzv
-  %one = insertelement <8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <8 x i128> %one, <8 x i128> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i128> %add, %splat
+  %div = lshr <8 x i128> %add, splat (i128 1)
   %ret = trunc <8 x i128> %div to <8 x i64>
   ret <8 x i64> %ret
 }
@@ -248,9 +240,7 @@ define <8 x i8> @vaaddu_vx_v8i8_ceil(<8 x i8> %x, i8 %y) {
   %yzv = zext <8 x i8> %ysplat to <8 x i16>
   %add = add nuw nsw <8 x i16> %xzv, %yzv
   %add1 = add nuw nsw <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  %one = insertelement <8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <8 x i16> %one, <8 x i16> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i16> %add1, %splat
+  %div = lshr <8 x i16> %add1, splat (i16 1)
   %ret = trunc <8 x i16> %div to <8 x i8>
   ret <8 x i8> %ret
 }
@@ -359,9 +349,7 @@ define <8 x i16> @vaaddu_vx_v8i16_ceil(<8 x i16> %x, i16 %y) {
   %yzv = zext <8 x i16> %ysplat to <8 x i32>
   %add = add nuw nsw <8 x i32> %xzv, %yzv
   %add1 = add nuw nsw <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
-  %one = insertelement <8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <8 x i32> %one, <8 x i32> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i32> %add1, %splat
+  %div = lshr <8 x i32> %add1, splat (i32 1)
   %ret = trunc <8 x i32> %div to <8 x i16>
   ret <8 x i16> %ret
 }
@@ -395,9 +383,7 @@ define <8 x i32> @vaaddu_vx_v8i32_ceil(<8 x i32> %x, i32 %y) {
   %yzv = zext <8 x i32> %ysplat to <8 x i64>
   %add = add nuw nsw <8 x i64> %xzv, %yzv
   %add1 = add nuw nsw <8 x i64> %add, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
-  %one = insertelement <8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <8 x i64> %one, <8 x i64> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i64> %add1, %splat
+  %div = lshr <8 x i64> %add1, splat (i64 1)
   %ret = trunc <8 x i64> %div to <8 x i32>
   ret <8 x i32> %ret
 }
@@ -467,9 +453,7 @@ define <8 x i64> @vaaddu_vx_v8i64_ceil(<8 x i64> %x, i64 %y) {
   %yzv = zext <8 x i64> %ysplat to <8 x i128>
   %add = add nuw nsw <8 x i128> %xzv, %yzv
   %add1 = add nuw nsw <8 x i128> %add, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
-  %one = insertelement <8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <8 x i128> %one, <8 x i128> poison, <8 x i32> zeroinitializer
-  %div = lshr <8 x i128> %add1, %splat
+  %div = lshr <8 x i128> %add1, splat (i128 1)
   %ret = trunc <8 x i128> %div to <8 x i64>
   ret <8 x i64> %ret
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index e15253b67275cc..2c62cbd583d00c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -60,9 +58,7 @@ define <2 x i8> @vadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -72,9 +68,7 @@ define <2 x i8> @vadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,11 +78,7 @@ define <2 x i8> @vadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -110,9 +100,7 @@ define <4 x i8> @vadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -148,9 +136,7 @@ define <4 x i8> @vadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -160,9 +146,7 @@ define <4 x i8> @vadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -172,11 +156,7 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -198,9 +178,7 @@ define <5 x i8> @vadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -224,9 +202,7 @@ define <5 x i8> @vadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -236,9 +212,7 @@ define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -248,11 +222,7 @@ define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -274,9 +244,7 @@ define <8 x i8> @vadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -300,9 +268,7 @@ define <8 x i8> @vadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -312,9 +278,7 @@ define <8 x i8> @vadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -324,11 +288,7 @@ define <8 x i8> @vadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -350,9 +310,7 @@ define <16 x i8> @vadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -376,9 +334,7 @@ define <16 x i8> @vadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -388,9 +344,7 @@ define <16 x i8> @vadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -400,11 +354,7 @@ define <16 x i8> @vadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -431,9 +381,7 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -455,11 +403,7 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -476,9 +420,7 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -491,9 +433,7 @@ define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -515,9 +455,7 @@ define <2 x i16> @vadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -541,9 +479,7 @@ define <2 x i16> @vadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -553,9 +489,7 @@ define <2 x i16> @vadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -565,11 +499,7 @@ define <2 x i16> @vadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -591,9 +521,7 @@ define <4 x i16> @vadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -617,9 +545,7 @@ define <4 x i16> @vadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -629,9 +555,7 @@ define <4 x i16> @vadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -641,11 +565,7 @@ define <4 x i16> @vadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -667,9 +587,7 @@ define <8 x i16> @vadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -693,9 +611,7 @@ define <8 x i16> @vadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -705,9 +621,7 @@ define <8 x i16> @vadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -717,11 +631,7 @@ define <8 x i16> @vadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -743,9 +653,7 @@ define <16 x i16> @vadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -769,9 +677,7 @@ define <16 x i16> @vadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -781,9 +687,7 @@ define <16 x i16> @vadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -793,11 +697,7 @@ define <16 x i16> @vadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -819,9 +719,7 @@ define <2 x i32> @vadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -845,9 +743,7 @@ define <2 x i32> @vadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -857,9 +753,7 @@ define <2 x i32> @vadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -869,11 +763,7 @@ define <2 x i32> @vadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -895,9 +785,7 @@ define <4 x i32> @vadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -921,9 +809,7 @@ define <4 x i32> @vadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -933,9 +819,7 @@ define <4 x i32> @vadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -945,11 +829,7 @@ define <4 x i32> @vadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -971,9 +851,7 @@ define <8 x i32> @vadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -997,9 +875,7 @@ define <8 x i32> @vadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1009,9 +885,7 @@ define <8 x i32> @vadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1021,11 +895,7 @@ define <8 x i32> @vadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1047,9 +917,7 @@ define <16 x i32> @vadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1073,9 +941,7 @@ define <16 x i32> @vadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1085,9 +951,7 @@ define <16 x i32> @vadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1097,11 +961,7 @@ define <16 x i32> @vadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1123,9 +983,7 @@ define <2 x i64> @vadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1177,9 +1035,7 @@ define <2 x i64> @vadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1189,9 +1045,7 @@ define <2 x i64> @vadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1201,11 +1055,7 @@ define <2 x i64> @vadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1227,9 +1077,7 @@ define <4 x i64> @vadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1281,9 +1129,7 @@ define <4 x i64> @vadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1293,9 +1139,7 @@ define <4 x i64> @vadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1305,11 +1149,7 @@ define <4 x i64> @vadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1331,9 +1171,7 @@ define <8 x i64> @vadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1385,9 +1223,7 @@ define <8 x i64> @vadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1397,9 +1233,7 @@ define <8 x i64> @vadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1409,11 +1243,7 @@ define <8 x i64> @vadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1435,9 +1265,7 @@ define <16 x i64> @vadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1489,9 +1317,7 @@ define <16 x i64> @vadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1501,9 +1327,7 @@ define <16 x i64> @vadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1513,11 +1337,7 @@ define <16 x i64> @vadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1529,16 +1349,16 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vadd_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1570,24 +1390,22 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vadd_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1615,11 +1433,7 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vadd.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1640,9 +1454,7 @@ define <32 x i64> @vadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vsetivli zero, 12, e64, m8, ta, ma
 ; RV64-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1671,8 +1483,6 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index 80b62a7a0aae6f..507cf5cc6b80cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vand_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <2 x i8> @vand_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -86,9 +82,7 @@ define <2 x i8> @vand_vx_v2i8_unmasked_commute(<2 x i8> %va, i8 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -98,9 +92,7 @@ define <2 x i8> @vand_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -110,11 +102,7 @@ define <2 x i8> @vand_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -136,9 +124,7 @@ define <4 x i8> @vand_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -162,9 +148,7 @@ define <4 x i8> @vand_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -174,9 +158,7 @@ define <4 x i8> @vand_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -186,11 +168,7 @@ define <4 x i8> @vand_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -212,9 +190,7 @@ define <8 x i8> @vand_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -238,9 +214,7 @@ define <8 x i8> @vand_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -250,9 +224,7 @@ define <8 x i8> @vand_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -262,11 +234,7 @@ define <8 x i8> @vand_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -288,9 +256,7 @@ define <16 x i8> @vand_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -314,9 +280,7 @@ define <16 x i8> @vand_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -326,9 +290,7 @@ define <16 x i8> @vand_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -338,11 +300,7 @@ define <16 x i8> @vand_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -364,9 +322,7 @@ define <2 x i16> @vand_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -390,9 +346,7 @@ define <2 x i16> @vand_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -402,9 +356,7 @@ define <2 x i16> @vand_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -414,11 +366,7 @@ define <2 x i16> @vand_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -440,9 +388,7 @@ define <4 x i16> @vand_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -466,9 +412,7 @@ define <4 x i16> @vand_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -478,9 +422,7 @@ define <4 x i16> @vand_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -490,11 +432,7 @@ define <4 x i16> @vand_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -516,9 +454,7 @@ define <8 x i16> @vand_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -542,9 +478,7 @@ define <8 x i16> @vand_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -554,9 +488,7 @@ define <8 x i16> @vand_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -566,11 +498,7 @@ define <8 x i16> @vand_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -592,9 +520,7 @@ define <16 x i16> @vand_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -618,9 +544,7 @@ define <16 x i16> @vand_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -630,9 +554,7 @@ define <16 x i16> @vand_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -642,11 +564,7 @@ define <16 x i16> @vand_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -668,9 +586,7 @@ define <2 x i32> @vand_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -694,9 +610,7 @@ define <2 x i32> @vand_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -706,9 +620,7 @@ define <2 x i32> @vand_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -718,11 +630,7 @@ define <2 x i32> @vand_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -744,9 +652,7 @@ define <4 x i32> @vand_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -770,9 +676,7 @@ define <4 x i32> @vand_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -782,9 +686,7 @@ define <4 x i32> @vand_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -794,11 +696,7 @@ define <4 x i32> @vand_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -820,9 +718,7 @@ define <8 x i32> @vand_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -846,9 +742,7 @@ define <8 x i32> @vand_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -858,9 +752,7 @@ define <8 x i32> @vand_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -870,11 +762,7 @@ define <8 x i32> @vand_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -896,9 +784,7 @@ define <16 x i32> @vand_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -922,9 +808,7 @@ define <16 x i32> @vand_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -934,9 +818,7 @@ define <16 x i32> @vand_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -946,11 +828,7 @@ define <16 x i32> @vand_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -972,9 +850,7 @@ define <2 x i64> @vand_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1026,9 +902,7 @@ define <2 x i64> @vand_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1038,9 +912,7 @@ define <2 x i64> @vand_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1050,11 +922,7 @@ define <2 x i64> @vand_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1076,9 +944,7 @@ define <4 x i64> @vand_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1130,9 +996,7 @@ define <4 x i64> @vand_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1142,9 +1006,7 @@ define <4 x i64> @vand_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1154,11 +1016,7 @@ define <4 x i64> @vand_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1180,9 +1038,7 @@ define <8 x i64> @vand_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1234,9 +1090,7 @@ define <8 x i64> @vand_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1246,9 +1100,7 @@ define <8 x i64> @vand_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1258,11 +1110,7 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1284,9 +1132,7 @@ define <11 x i64> @vand_vv_v11i64_unmasked(<11 x i64> %va, <11 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <11 x i1> poison, i1 true, i32 0
-  %m = shufflevector <11 x i1> %head, <11 x i1> poison, <11 x i32> zeroinitializer
-  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl)
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> splat (i1 true), i32 %evl)
   ret <11 x i64> %v
 }
 
@@ -1338,9 +1184,7 @@ define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <11 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <11 x i64> %elt.head, <11 x i64> poison, <11 x i32> zeroinitializer
-  %head = insertelement <11 x i1> poison, i1 true, i32 0
-  %m = shufflevector <11 x i1> %head, <11 x i1> poison, <11 x i32> zeroinitializer
-  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> splat (i1 true), i32 %evl)
   ret <11 x i64> %v
 }
 
@@ -1350,9 +1194,7 @@ define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <11 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> poison, <11 x i32> zeroinitializer
-  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> splat (i64 4), <11 x i1> %m, i32 %evl)
   ret <11 x i64> %v
 }
 
@@ -1362,11 +1204,7 @@ define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <11 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> poison, <11 x i32> zeroinitializer
-  %head = insertelement <11 x i1> poison, i1 true, i32 0
-  %m = shufflevector <11 x i1> %head, <11 x i1> poison, <11 x i32> zeroinitializer
-  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> splat (i64 4), <11 x i1> splat (i1 true), i32 %evl)
   ret <11 x i64> %v
 }
 
@@ -1388,9 +1226,7 @@ define <16 x i64> @vand_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1442,9 +1278,7 @@ define <16 x i64> @vand_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1454,9 +1288,7 @@ define <16 x i64> @vand_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1466,10 +1298,6 @@ define <16 x i64> @vand_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index f83968d54b2c98..01b07b4081e6d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -22,9 +22,7 @@ define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -46,9 +44,7 @@ define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -70,9 +66,7 @@ define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -94,9 +88,7 @@ define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -118,9 +110,7 @@ define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -142,9 +132,7 @@ define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -166,9 +154,7 @@ define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -190,9 +176,7 @@ define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -214,9 +198,7 @@ define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -238,9 +220,7 @@ define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -262,9 +242,7 @@ define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -286,9 +264,7 @@ define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -310,9 +286,7 @@ define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -384,8 +358,6 @@ define <32 x double> @vfsgnj_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
index 435d3ff1746d19..29f8eaba900527 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vdiv_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vdiv_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <4 x i8> @vdiv_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -115,9 +109,7 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <8 x i8> @vdiv_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <8 x i8> @vdiv_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <16 x i8> @vdiv_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <16 x i8> @vdiv_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <2 x i16> @vdiv_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -277,9 +259,7 @@ define <2 x i16> @vdiv_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -301,9 +281,7 @@ define <4 x i16> @vdiv_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -327,9 +305,7 @@ define <4 x i16> @vdiv_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -351,9 +327,7 @@ define <8 x i16> @vdiv_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -377,9 +351,7 @@ define <8 x i16> @vdiv_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -401,9 +373,7 @@ define <16 x i16> @vdiv_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -427,9 +397,7 @@ define <16 x i16> @vdiv_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -451,9 +419,7 @@ define <2 x i32> @vdiv_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -477,9 +443,7 @@ define <2 x i32> @vdiv_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -501,9 +465,7 @@ define <4 x i32> @vdiv_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -527,9 +489,7 @@ define <4 x i32> @vdiv_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -551,9 +511,7 @@ define <8 x i32> @vdiv_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -577,9 +535,7 @@ define <8 x i32> @vdiv_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -601,9 +557,7 @@ define <16 x i32> @vdiv_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -627,9 +581,7 @@ define <16 x i32> @vdiv_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -651,9 +603,7 @@ define <2 x i64> @vdiv_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -705,9 +655,7 @@ define <2 x i64> @vdiv_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -729,9 +677,7 @@ define <4 x i64> @vdiv_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -783,9 +729,7 @@ define <4 x i64> @vdiv_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -807,9 +751,7 @@ define <8 x i64> @vdiv_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -861,9 +803,7 @@ define <8 x i64> @vdiv_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -885,9 +825,7 @@ define <16 x i64> @vdiv_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -939,8 +877,6 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
index 4e78c5cde2faf4..3f8eb0ff276b7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vdivu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vdivu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -88,9 +84,7 @@ define <4 x i8> @vdivu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -114,9 +108,7 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -150,9 +142,7 @@ define <8 x i8> @vdivu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -176,9 +166,7 @@ define <8 x i8> @vdivu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -200,9 +188,7 @@ define <16 x i8> @vdivu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -226,9 +212,7 @@ define <16 x i8> @vdivu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -250,9 +234,7 @@ define <2 x i16> @vdivu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -276,9 +258,7 @@ define <2 x i16> @vdivu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -300,9 +280,7 @@ define <4 x i16> @vdivu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -326,9 +304,7 @@ define <4 x i16> @vdivu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -350,9 +326,7 @@ define <8 x i16> @vdivu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -376,9 +350,7 @@ define <8 x i16> @vdivu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -400,9 +372,7 @@ define <16 x i16> @vdivu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -426,9 +396,7 @@ define <16 x i16> @vdivu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -450,9 +418,7 @@ define <2 x i32> @vdivu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -476,9 +442,7 @@ define <2 x i32> @vdivu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -500,9 +464,7 @@ define <4 x i32> @vdivu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -526,9 +488,7 @@ define <4 x i32> @vdivu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -550,9 +510,7 @@ define <8 x i32> @vdivu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -576,9 +534,7 @@ define <8 x i32> @vdivu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -600,9 +556,7 @@ define <16 x i32> @vdivu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -626,9 +580,7 @@ define <16 x i32> @vdivu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -650,9 +602,7 @@ define <2 x i64> @vdivu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -704,9 +654,7 @@ define <2 x i64> @vdivu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -728,9 +676,7 @@ define <4 x i64> @vdivu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -782,9 +728,7 @@ define <4 x i64> @vdivu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -806,9 +750,7 @@ define <8 x i64> @vdivu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -860,9 +802,7 @@ define <8 x i64> @vdivu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -884,9 +824,7 @@ define <16 x i64> @vdivu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -938,8 +876,6 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
index a30f682d5cf1e2..f32e2bbf37946c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
@@ -46,9 +46,7 @@ define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fabs.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fabs.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fabs.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fabs.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fabs.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fabs.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fabs.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fabs.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -202,9 +194,7 @@ define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -226,9 +216,7 @@ define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -250,9 +238,7 @@ define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -274,9 +260,7 @@ define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -298,9 +282,7 @@ define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -322,9 +304,7 @@ define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -346,9 +326,7 @@ define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -370,9 +348,7 @@ define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -394,9 +370,7 @@ define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -445,8 +419,6 @@ define <32 x double> @vfabs_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
index fa01057b21203e..f023c760f14a74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -106,9 +104,7 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -175,9 +171,7 @@ define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -233,9 +227,7 @@ define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -279,9 +271,7 @@ define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -337,9 +327,7 @@ define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -383,9 +371,7 @@ define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -441,9 +427,7 @@ define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -465,9 +449,7 @@ define <2 x float> @vfadd_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -503,9 +485,7 @@ define <2 x float> @vfadd_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -517,9 +497,7 @@ define <2 x float> @vfadd_vf_v2f32_unmasked_commute(<2 x float> %va, float %b, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -541,9 +519,7 @@ define <4 x float> @vfadd_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -567,9 +543,7 @@ define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -591,9 +565,7 @@ define <8 x float> @vfadd_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -617,9 +589,7 @@ define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -641,9 +611,7 @@ define <16 x float> @vfadd_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -667,9 +635,7 @@ define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -691,9 +657,7 @@ define <2 x double> @vfadd_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -717,9 +681,7 @@ define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -741,9 +703,7 @@ define <4 x double> @vfadd_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -767,9 +727,7 @@ define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -791,9 +749,7 @@ define <8 x double> @vfadd_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -817,9 +773,7 @@ define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -841,9 +795,7 @@ define <16 x double> @vfadd_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -867,8 +819,6 @@ define <16 x double> @vfadd_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
index 8eefae291d3435..09b9e7ce4c53db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
@@ -26,9 +26,7 @@ define <2 x i1> @isnan_v2f16_unmasked(<2 x half> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl)  ; nan
+  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> splat (i1 true), i32 %evl)  ; nan
   ret <2 x i1> %1
 }
 
@@ -54,9 +52,7 @@ define <2 x i1> @isnan_v2f32_unmasked(<2 x float> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> %m, i32 %evl)
+  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i1> %1
 }
 
@@ -82,9 +78,7 @@ define <4 x i1> @isnan_v4f32_unmasked(<4 x float> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> %m, i32 %evl)  ; nan
+  %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> splat (i1 true), i32 %evl)  ; nan
   ret <4 x i1> %1
 }
 
@@ -109,9 +103,7 @@ define <8 x i1> @isnan_v8f32_unmasked(<8 x float> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    li a0, 512
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> %m, i32 %evl)
+  %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i1> %1
 }
 
@@ -136,9 +128,7 @@ define <16 x i1> @isnan_v16f32_unmasked(<16 x float> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    li a0, 256
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-    %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> %m, i32 %evl)
+  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i1> %1
 }
 
@@ -164,9 +154,7 @@ define <2 x i1> @isnormal_v2f64_unmasked(<2 x double> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+  %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> splat (i1 true), i32 %evl) ; 0x204 = "inf"
   ret <2 x i1> %1
 }
 
@@ -191,9 +179,7 @@ define <4 x i1> @isposinf_v4f64_unmasked(<4 x double> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    li a0, 128
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+  %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> splat (i1 true), i32 %evl) ; 0x200 = "+inf"
   ret <4 x i1> %1
 }
 
@@ -216,9 +202,7 @@ define <8 x i1> @isneginf_v8f64_unmasked(<8 x double> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vfclass.v v8, v8
 ; CHECK-NEXT:    vmseq.vi v0, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> %m, i32 %evl) ; "-inf"
+  %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> splat (i1 true), i32 %evl) ; "-inf"
   ret <8 x i1> %1
 }
 
@@ -245,9 +229,7 @@ define <16 x i1> @isfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) {
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> %m, i32 %evl) ; 0x1f8 = "finite"
+  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> splat (i1 true), i32 %evl) ; 0x1f8 = "finite"
   ret <16 x i1> %1
 }
 
@@ -273,9 +255,7 @@ define <16 x i1> @isnegfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl
 ; CHECK-NEXT:    vand.vi v8, v8, 14
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 56, <16 x i1> %m, i32 %evl) ; 0x38 = "-finite"
+  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 56, <16 x i1> splat (i1 true), i32 %evl) ; 0x38 = "-finite"
   ret <16 x i1> %1
 }
 
@@ -302,9 +282,7 @@ define <16 x i1> @isnotfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> %m, i32 %evl) ; 0x207 = "inf|nan"
+  %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> splat (i1 true), i32 %evl) ; 0x207 = "inf|nan"
   ret <16 x i1> %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
index 8c19e0bae81cfe..9fb8377d5a5ef9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -106,9 +104,7 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -175,9 +171,7 @@ define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -233,9 +227,7 @@ define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -279,9 +271,7 @@ define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -337,9 +327,7 @@ define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -383,9 +371,7 @@ define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -441,9 +427,7 @@ define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -465,9 +449,7 @@ define <2 x float> @vfdiv_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -491,9 +473,7 @@ define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -515,9 +495,7 @@ define <4 x float> @vfdiv_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -541,9 +519,7 @@ define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -565,9 +541,7 @@ define <8 x float> @vfdiv_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -591,9 +565,7 @@ define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -615,9 +587,7 @@ define <16 x float> @vfdiv_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -641,9 +611,7 @@ define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -665,9 +633,7 @@ define <2 x double> @vfdiv_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -691,9 +657,7 @@ define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -715,9 +679,7 @@ define <4 x double> @vfdiv_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -741,9 +703,7 @@ define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -765,9 +725,7 @@ define <8 x double> @vfdiv_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -791,9 +749,7 @@ define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -815,9 +771,7 @@ define <16 x double> @vfdiv_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -841,8 +795,6 @@ define <16 x double> @vfdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index d7b89ee054af8c..0574773fb2fd93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -51,9 +51,7 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x ha
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -111,9 +109,7 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -160,9 +156,7 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x ha
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -220,9 +214,7 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -269,9 +261,7 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x ha
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v14
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -329,9 +319,7 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -378,9 +366,7 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v20
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -438,9 +424,7 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -463,9 +447,7 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -489,9 +471,7 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -514,9 +494,7 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -540,9 +518,7 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -565,9 +541,7 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -591,9 +565,7 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -616,9 +588,7 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -642,9 +612,7 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -667,9 +635,7 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -693,9 +659,7 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -718,9 +682,7 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -744,9 +706,7 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -769,9 +729,7 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -795,9 +753,7 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -824,9 +780,7 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -853,9 +807,7 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -879,9 +831,7 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -1032,8 +982,6 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll
index 78b83046738c60..2d6e1fd02dee57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll
@@ -16,9 +16,7 @@ define <2 x half> @vfmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -30,10 +28,8 @@ define <2 x half> @vfmacc_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -46,9 +42,7 @@ define <2 x half> @vfmacc_vf_v2f16(<2 x half> %va, half %b, <2 x half> %c, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -62,9 +56,7 @@ define <2 x half> @vfmacc_vf_v2f16_commute(<2 x half> %va, half %b, <2 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %va, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %va, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -78,10 +70,8 @@ define <2 x half> @vfmacc_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -92,9 +82,7 @@ define <2 x half> @vfmacc_vv_v2f16_ta(<2 x half> %a, <2 x half> %b, <2 x half> %
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -108,9 +96,7 @@ define <2 x half> @vfmacc_vf_v2f16_ta(<2 x half> %va, half %b, <2 x half> %c, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -124,9 +110,7 @@ define <2 x half> @vfmacc_vf_v2f16_commute_ta(<2 x half> %va, half %b, <2 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %va, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %va, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -143,9 +127,7 @@ define <4 x half> @vfmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -157,10 +139,8 @@ define <4 x half> @vfmacc_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x h
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -173,9 +153,7 @@ define <4 x half> @vfmacc_vf_v4f16(<4 x half> %va, half %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -189,9 +167,7 @@ define <4 x half> @vfmacc_vf_v4f16_commute(<4 x half> %va, half %b, <4 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %va, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %va, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -205,10 +181,8 @@ define <4 x half> @vfmacc_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -219,9 +193,7 @@ define <4 x half> @vfmacc_vv_v4f16_ta(<4 x half> %a, <4 x half> %b, <4 x half> %
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -235,9 +207,7 @@ define <4 x half> @vfmacc_vf_v4f16_ta(<4 x half> %va, half %b, <4 x half> %c, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -251,9 +221,7 @@ define <4 x half> @vfmacc_vf_v4f16_commute_ta(<4 x half> %va, half %b, <4 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %va, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %va, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -270,9 +238,7 @@ define <8 x half> @vfmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -284,10 +250,8 @@ define <8 x half> @vfmacc_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x h
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -300,9 +264,7 @@ define <8 x half> @vfmacc_vf_v8f16(<8 x half> %va, half %b, <8 x half> %c, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -316,9 +278,7 @@ define <8 x half> @vfmacc_vf_v8f16_commute(<8 x half> %va, half %b, <8 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %va, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %va, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -332,10 +292,8 @@ define <8 x half> @vfmacc_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -346,9 +304,7 @@ define <8 x half> @vfmacc_vv_v8f16_ta(<8 x half> %a, <8 x half> %b, <8 x half> %
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -362,9 +318,7 @@ define <8 x half> @vfmacc_vf_v8f16_ta(<8 x half> %va, half %b, <8 x half> %c, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -378,9 +332,7 @@ define <8 x half> @vfmacc_vf_v8f16_commute_ta(<8 x half> %va, half %b, <8 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %va, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %va, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -397,9 +349,7 @@ define <16 x half> @vfmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half>
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -411,10 +361,8 @@ define <16 x half> @vfmacc_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <1
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -427,9 +375,7 @@ define <16 x half> @vfmacc_vf_v16f16(<16 x half> %va, half %b, <16 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -443,9 +389,7 @@ define <16 x half> @vfmacc_vf_v16f16_commute(<16 x half> %va, half %b, <16 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %va, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %va, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -459,10 +403,8 @@ define <16 x half> @vfmacc_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -473,9 +415,7 @@ define <16 x half> @vfmacc_vv_v16f16_ta(<16 x half> %a, <16 x half> %b, <16 x ha
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -489,9 +429,7 @@ define <16 x half> @vfmacc_vf_v16f16_ta(<16 x half> %va, half %b, <16 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -505,9 +443,7 @@ define <16 x half> @vfmacc_vf_v16f16_commute_ta(<16 x half> %va, half %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %va, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %va, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -524,9 +460,7 @@ define <32 x half> @vfmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half>
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -538,10 +472,8 @@ define <32 x half> @vfmacc_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <3
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -554,9 +486,7 @@ define <32 x half> @vfmacc_vf_v32f16(<32 x half> %va, half %b, <32 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -570,9 +500,7 @@ define <32 x half> @vfmacc_vf_v32f16_commute(<32 x half> %va, half %b, <32 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %va, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %va, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -586,10 +514,8 @@ define <32 x half> @vfmacc_vf_v32f16_unmasked(<32 x half> %va, half %b, <32 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -600,9 +526,7 @@ define <32 x half> @vfmacc_vv_v32f16_ta(<32 x half> %a, <32 x half> %b, <32 x ha
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -616,9 +540,7 @@ define <32 x half> @vfmacc_vf_v32f16_ta(<32 x half> %va, half %b, <32 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -632,9 +554,7 @@ define <32 x half> @vfmacc_vf_v32f16_commute_ta(<32 x half> %va, half %b, <32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %va, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %va, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -651,9 +571,7 @@ define <2 x float> @vfmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float>
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -665,10 +583,8 @@ define <2 x float> @vfmacc_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -681,9 +597,7 @@ define <2 x float> @vfmacc_vf_v2f32(<2 x float> %va, float %b, <2 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -697,9 +611,7 @@ define <2 x float> @vfmacc_vf_v2f32_commute(<2 x float> %va, float %b, <2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %va, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %va, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -713,10 +625,8 @@ define <2 x float> @vfmacc_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -727,9 +637,7 @@ define <2 x float> @vfmacc_vv_v2f32_ta(<2 x float> %a, <2 x float> %b, <2 x floa
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -743,9 +651,7 @@ define <2 x float> @vfmacc_vf_v2f32_ta(<2 x float> %va, float %b, <2 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -759,9 +665,7 @@ define <2 x float> @vfmacc_vf_v2f32_commute_ta(<2 x float> %va, float %b, <2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %va, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %va, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -778,9 +682,7 @@ define <4 x float> @vfmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -792,10 +694,8 @@ define <4 x float> @vfmacc_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -808,9 +708,7 @@ define <4 x float> @vfmacc_vf_v4f32(<4 x float> %va, float %b, <4 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -824,9 +722,7 @@ define <4 x float> @vfmacc_vf_v4f32_commute(<4 x float> %va, float %b, <4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %va, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %va, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -840,10 +736,8 @@ define <4 x float> @vfmacc_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -854,9 +748,7 @@ define <4 x float> @vfmacc_vv_v4f32_ta(<4 x float> %a, <4 x float> %b, <4 x floa
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -870,9 +762,7 @@ define <4 x float> @vfmacc_vf_v4f32_ta(<4 x float> %va, float %b, <4 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -886,9 +776,7 @@ define <4 x float> @vfmacc_vf_v4f32_commute_ta(<4 x float> %va, float %b, <4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %va, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %va, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -905,9 +793,7 @@ define <8 x float> @vfmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -919,10 +805,8 @@ define <8 x float> @vfmacc_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -935,9 +819,7 @@ define <8 x float> @vfmacc_vf_v8f32(<8 x float> %va, float %b, <8 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -951,9 +833,7 @@ define <8 x float> @vfmacc_vf_v8f32_commute(<8 x float> %va, float %b, <8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %va, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %va, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -967,10 +847,8 @@ define <8 x float> @vfmacc_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -981,9 +859,7 @@ define <8 x float> @vfmacc_vv_v8f32_ta(<8 x float> %a, <8 x float> %b, <8 x floa
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -997,9 +873,7 @@ define <8 x float> @vfmacc_vf_v8f32_ta(<8 x float> %va, float %b, <8 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1013,9 +887,7 @@ define <8 x float> @vfmacc_vf_v8f32_commute_ta(<8 x float> %va, float %b, <8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %va, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %va, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1032,9 +904,7 @@ define <16 x float> @vfmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x fl
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1046,10 +916,8 @@ define <16 x float> @vfmacc_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b,
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1062,9 +930,7 @@ define <16 x float> @vfmacc_vf_v16f32(<16 x float> %va, float %b, <16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1078,9 +944,7 @@ define <16 x float> @vfmacc_vf_v16f32_commute(<16 x float> %va, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %va, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %va, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1094,10 +958,8 @@ define <16 x float> @vfmacc_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1108,9 +970,7 @@ define <16 x float> @vfmacc_vv_v16f32_ta(<16 x float> %a, <16 x float> %b, <16 x
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1124,9 +984,7 @@ define <16 x float> @vfmacc_vf_v16f32_ta(<16 x float> %va, float %b, <16 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1140,9 +998,7 @@ define <16 x float> @vfmacc_vf_v16f32_commute_ta(<16 x float> %va, float %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %va, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %va, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1159,9 +1015,7 @@ define <2 x double> @vfmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x doub
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1173,10 +1027,8 @@ define <2 x double> @vfmacc_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1189,9 +1041,7 @@ define <2 x double> @vfmacc_vf_v2f64(<2 x double> %va, double %b, <2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1205,9 +1055,7 @@ define <2 x double> @vfmacc_vf_v2f64_commute(<2 x double> %va, double %b, <2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %va, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %va, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1221,10 +1069,8 @@ define <2 x double> @vfmacc_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1235,9 +1081,7 @@ define <2 x double> @vfmacc_vv_v2f64_ta(<2 x double> %a, <2 x double> %b, <2 x d
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1251,9 +1095,7 @@ define <2 x double> @vfmacc_vf_v2f64_ta(<2 x double> %va, double %b, <2 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1267,9 +1109,7 @@ define <2 x double> @vfmacc_vf_v2f64_commute_ta(<2 x double> %va, double %b, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %va, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %va, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1286,9 +1126,7 @@ define <4 x double> @vfmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x doub
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1300,10 +1138,8 @@ define <4 x double> @vfmacc_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b,
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1316,9 +1152,7 @@ define <4 x double> @vfmacc_vf_v4f64(<4 x double> %va, double %b, <4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1332,9 +1166,7 @@ define <4 x double> @vfmacc_vf_v4f64_commute(<4 x double> %va, double %b, <4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %va, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %va, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1348,10 +1180,8 @@ define <4 x double> @vfmacc_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1362,9 +1192,7 @@ define <4 x double> @vfmacc_vv_v4f64_ta(<4 x double> %a, <4 x double> %b, <4 x d
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1378,9 +1206,7 @@ define <4 x double> @vfmacc_vf_v4f64_ta(<4 x double> %va, double %b, <4 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1394,9 +1220,7 @@ define <4 x double> @vfmacc_vf_v4f64_commute_ta(<4 x double> %va, double %b, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %va, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %va, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1413,9 +1237,7 @@ define <8 x double> @vfmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x doub
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1427,10 +1249,8 @@ define <8 x double> @vfmacc_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b,
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1443,9 +1263,7 @@ define <8 x double> @vfmacc_vf_v8f64(<8 x double> %va, double %b, <8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1459,9 +1277,7 @@ define <8 x double> @vfmacc_vf_v8f64_commute(<8 x double> %va, double %b, <8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %va, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %va, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1475,10 +1291,8 @@ define <8 x double> @vfmacc_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1489,9 +1303,7 @@ define <8 x double> @vfmacc_vv_v8f64_ta(<8 x double> %a, <8 x double> %b, <8 x d
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1505,9 +1317,7 @@ define <8 x double> @vfmacc_vf_v8f64_ta(<8 x double> %va, double %b, <8 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1521,9 +1331,7 @@ define <8 x double> @vfmacc_vf_v8f64_commute_ta(<8 x double> %va, double %b, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %va, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %va, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index 86218ddb04bd64..ffa88e28d7dc86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.maxnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.maxnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -94,9 +92,7 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.maxnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.maxnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -140,9 +136,7 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.maxnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.maxnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -186,9 +180,7 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.maxnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.maxnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -210,9 +202,7 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -234,9 +224,7 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -258,9 +246,7 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -282,9 +268,7 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -306,9 +290,7 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -330,9 +312,7 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -354,9 +334,7 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -378,9 +356,7 @@ define <15 x double> @vfmax_vv_v15f64_unmasked(<15 x double> %va, <15 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -402,9 +378,7 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -476,8 +450,6 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 8b8049ea6c6289..17f851e172f814 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -94,9 +92,7 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -140,9 +136,7 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -186,9 +180,7 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -210,9 +202,7 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -234,9 +224,7 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -258,9 +246,7 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -282,9 +268,7 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -306,9 +290,7 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -330,9 +312,7 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -354,9 +334,7 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -378,9 +356,7 @@ define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -402,9 +378,7 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -476,8 +450,6 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll
index 3960d061fd66e6..fc6578225aa647 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll
@@ -16,10 +16,8 @@ define <2 x half> @vfmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -31,11 +29,9 @@ define <2 x half> @vfmsac_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -48,10 +44,8 @@ define <2 x half> @vfmsac_vf_v2f16(<2 x half> %a, half %b, <2 x half> %c, <2 x i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -65,10 +59,8 @@ define <2 x half> @vfmsac_vf_v2f16_commute(<2 x half> %a, half %b, <2 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %a, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %a, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -82,11 +74,9 @@ define <2 x half> @vfmsac_vf_v2f16_unmasked(<2 x half> %a, half %b, <2 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -97,10 +87,8 @@ define <2 x half> @vfmsac_vv_v2f16_ta(<2 x half> %a, <2 x half> %b, <2 x half> %
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -114,10 +102,8 @@ define <2 x half> @vfmsac_vf_v2f16_ta(<2 x half> %a, half %b, <2 x half> %c, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %a, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -131,10 +117,8 @@ define <2 x half> @vfmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %a, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %a, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -151,10 +135,8 @@ define <4 x half> @vfmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -166,11 +148,9 @@ define <4 x half> @vfmsac_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x h
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -183,10 +163,8 @@ define <4 x half> @vfmsac_vf_v4f16(<4 x half> %a, half %b, <4 x half> %c, <4 x i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -200,10 +178,8 @@ define <4 x half> @vfmsac_vf_v4f16_commute(<4 x half> %a, half %b, <4 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %a, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %a, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -217,11 +193,9 @@ define <4 x half> @vfmsac_vf_v4f16_unmasked(<4 x half> %a, half %b, <4 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -232,10 +206,8 @@ define <4 x half> @vfmsac_vv_v4f16_ta(<4 x half> %a, <4 x half> %b, <4 x half> %
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -249,10 +221,8 @@ define <4 x half> @vfmsac_vf_v4f16_ta(<4 x half> %a, half %b, <4 x half> %c, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %a, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -266,10 +236,8 @@ define <4 x half> @vfmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %a, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %a, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -286,10 +254,8 @@ define <8 x half> @vfmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -301,11 +267,9 @@ define <8 x half> @vfmsac_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x h
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -318,10 +282,8 @@ define <8 x half> @vfmsac_vf_v8f16(<8 x half> %a, half %b, <8 x half> %c, <8 x i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -335,10 +297,8 @@ define <8 x half> @vfmsac_vf_v8f16_commute(<8 x half> %a, half %b, <8 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %a, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %a, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -352,11 +312,9 @@ define <8 x half> @vfmsac_vf_v8f16_unmasked(<8 x half> %a, half %b, <8 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -367,10 +325,8 @@ define <8 x half> @vfmsac_vv_v8f16_ta(<8 x half> %a, <8 x half> %b, <8 x half> %
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -384,10 +340,8 @@ define <8 x half> @vfmsac_vf_v8f16_ta(<8 x half> %a, half %b, <8 x half> %c, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %a, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -401,10 +355,8 @@ define <8 x half> @vfmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %a, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %a, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -421,10 +373,8 @@ define <16 x half> @vfmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half>
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -436,11 +386,9 @@ define <16 x half> @vfmsac_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <1
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -453,10 +401,8 @@ define <16 x half> @vfmsac_vf_v16f16(<16 x half> %a, half %b, <16 x half> %c, <1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -470,10 +416,8 @@ define <16 x half> @vfmsac_vf_v16f16_commute(<16 x half> %a, half %b, <16 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %a, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %a, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -487,11 +431,9 @@ define <16 x half> @vfmsac_vf_v16f16_unmasked(<16 x half> %a, half %b, <16 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -502,10 +444,8 @@ define <16 x half> @vfmsac_vv_v16f16_ta(<16 x half> %a, <16 x half> %b, <16 x ha
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -519,10 +459,8 @@ define <16 x half> @vfmsac_vf_v16f16_ta(<16 x half> %a, half %b, <16 x half> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %a, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -536,10 +474,8 @@ define <16 x half> @vfmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %a, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %a, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -556,10 +492,8 @@ define <32 x half> @vfmsac_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half>
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -571,11 +505,9 @@ define <32 x half> @vfmsac_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <3
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -588,10 +520,8 @@ define <32 x half> @vfmsac_vf_v32f16(<32 x half> %a, half %b, <32 x half> %c, <3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -605,10 +535,8 @@ define <32 x half> @vfmsac_vf_v32f16_commute(<32 x half> %a, half %b, <32 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %a, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %a, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -622,11 +550,9 @@ define <32 x half> @vfmsac_vf_v32f16_unmasked(<32 x half> %a, half %b, <32 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -637,10 +563,8 @@ define <32 x half> @vfmsac_vv_v32f16_ta(<32 x half> %a, <32 x half> %b, <32 x ha
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -654,10 +578,8 @@ define <32 x half> @vfmsac_vf_v32f16_ta(<32 x half> %a, half %b, <32 x half> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %a, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -671,10 +593,8 @@ define <32 x half> @vfmsac_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %a, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %a, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -691,10 +611,8 @@ define <2 x float> @vfmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float>
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -706,11 +624,9 @@ define <2 x float> @vfmsac_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -723,10 +639,8 @@ define <2 x float> @vfmsac_vf_v2f32(<2 x float> %a, float %b, <2 x float> %c, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -740,10 +654,8 @@ define <2 x float> @vfmsac_vf_v2f32_commute(<2 x float> %a, float %b, <2 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %a, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %a, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -757,11 +669,9 @@ define <2 x float> @vfmsac_vf_v2f32_unmasked(<2 x float> %a, float %b, <2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -772,10 +682,8 @@ define <2 x float> @vfmsac_vv_v2f32_ta(<2 x float> %a, <2 x float> %b, <2 x floa
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -789,10 +697,8 @@ define <2 x float> @vfmsac_vf_v2f32_ta(<2 x float> %a, float %b, <2 x float> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %a, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -806,10 +712,8 @@ define <2 x float> @vfmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %a, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %a, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -826,10 +730,8 @@ define <4 x float> @vfmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -841,11 +743,9 @@ define <4 x float> @vfmsac_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -858,10 +758,8 @@ define <4 x float> @vfmsac_vf_v4f32(<4 x float> %a, float %b, <4 x float> %c, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -875,10 +773,8 @@ define <4 x float> @vfmsac_vf_v4f32_commute(<4 x float> %a, float %b, <4 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %a, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %a, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -892,11 +788,9 @@ define <4 x float> @vfmsac_vf_v4f32_unmasked(<4 x float> %a, float %b, <4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -907,10 +801,8 @@ define <4 x float> @vfmsac_vv_v4f32_ta(<4 x float> %a, <4 x float> %b, <4 x floa
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -924,10 +816,8 @@ define <4 x float> @vfmsac_vf_v4f32_ta(<4 x float> %a, float %b, <4 x float> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -941,10 +831,8 @@ define <4 x float> @vfmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %a, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %a, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -961,10 +849,8 @@ define <8 x float> @vfmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -976,11 +862,9 @@ define <8 x float> @vfmsac_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -993,10 +877,8 @@ define <8 x float> @vfmsac_vf_v8f32(<8 x float> %a, float %b, <8 x float> %c, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1010,10 +892,8 @@ define <8 x float> @vfmsac_vf_v8f32_commute(<8 x float> %a, float %b, <8 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %a, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %a, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1027,11 +907,9 @@ define <8 x float> @vfmsac_vf_v8f32_unmasked(<8 x float> %a, float %b, <8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -1042,10 +920,8 @@ define <8 x float> @vfmsac_vv_v8f32_ta(<8 x float> %a, <8 x float> %b, <8 x floa
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1059,10 +935,8 @@ define <8 x float> @vfmsac_vf_v8f32_ta(<8 x float> %a, float %b, <8 x float> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %a, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1076,10 +950,8 @@ define <8 x float> @vfmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %a, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %a, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1096,10 +968,8 @@ define <16 x float> @vfmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x fl
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1111,11 +981,9 @@ define <16 x float> @vfmsac_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b,
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1128,10 +996,8 @@ define <16 x float> @vfmsac_vf_v16f32(<16 x float> %a, float %b, <16 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1145,10 +1011,8 @@ define <16 x float> @vfmsac_vf_v16f32_commute(<16 x float> %a, float %b, <16 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %a, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %a, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1162,11 +1026,9 @@ define <16 x float> @vfmsac_vf_v16f32_unmasked(<16 x float> %a, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1177,10 +1039,8 @@ define <16 x float> @vfmsac_vv_v16f32_ta(<16 x float> %a, <16 x float> %b, <16 x
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1194,10 +1054,8 @@ define <16 x float> @vfmsac_vf_v16f32_ta(<16 x float> %a, float %b, <16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %a, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1211,10 +1069,8 @@ define <16 x float> @vfmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %a, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %a, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1231,10 +1087,8 @@ define <2 x double> @vfmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x doub
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1246,11 +1100,9 @@ define <2 x double> @vfmsac_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1263,10 +1115,8 @@ define <2 x double> @vfmsac_vf_v2f64(<2 x double> %a, double %b, <2 x double> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1280,10 +1130,8 @@ define <2 x double> @vfmsac_vf_v2f64_commute(<2 x double> %a, double %b, <2 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %a, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %a, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1297,11 +1145,9 @@ define <2 x double> @vfmsac_vf_v2f64_unmasked(<2 x double> %a, double %b, <2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1312,10 +1158,8 @@ define <2 x double> @vfmsac_vv_v2f64_ta(<2 x double> %a, <2 x double> %b, <2 x d
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1329,10 +1173,8 @@ define <2 x double> @vfmsac_vf_v2f64_ta(<2 x double> %a, double %b, <2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %a, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1346,10 +1188,8 @@ define <2 x double> @vfmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %a, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %a, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1366,10 +1206,8 @@ define <4 x double> @vfmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x doub
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1381,11 +1219,9 @@ define <4 x double> @vfmsac_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b,
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1398,10 +1234,8 @@ define <4 x double> @vfmsac_vf_v4f64(<4 x double> %a, double %b, <4 x double> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1415,10 +1249,8 @@ define <4 x double> @vfmsac_vf_v4f64_commute(<4 x double> %a, double %b, <4 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %a, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %a, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1432,11 +1264,9 @@ define <4 x double> @vfmsac_vf_v4f64_unmasked(<4 x double> %a, double %b, <4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1447,10 +1277,8 @@ define <4 x double> @vfmsac_vv_v4f64_ta(<4 x double> %a, <4 x double> %b, <4 x d
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1464,10 +1292,8 @@ define <4 x double> @vfmsac_vf_v4f64_ta(<4 x double> %a, double %b, <4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %a, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1481,10 +1307,8 @@ define <4 x double> @vfmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %a, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %a, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1501,10 +1325,8 @@ define <8 x double> @vfmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x doub
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1516,11 +1338,9 @@ define <8 x double> @vfmsac_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b,
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1533,10 +1353,8 @@ define <8 x double> @vfmsac_vf_v8f64(<8 x double> %a, double %b, <8 x double> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1550,10 +1368,8 @@ define <8 x double> @vfmsac_vf_v8f64_commute(<8 x double> %a, double %b, <8 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %a, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %a, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1567,11 +1383,9 @@ define <8 x double> @vfmsac_vf_v8f64_unmasked(<8 x double> %a, double %b, <8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1582,10 +1396,8 @@ define <8 x double> @vfmsac_vv_v8f64_ta(<8 x double> %a, <8 x double> %b, <8 x d
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1599,10 +1411,8 @@ define <8 x double> @vfmsac_vf_v8f64_ta(<8 x double> %a, double %b, <8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %a, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1616,10 +1426,8 @@ define <8 x double> @vfmsac_vf_v8f64_commute_ta(<8 x double> %a, double %b, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %a, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %a, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
index 4e06263f4e8b70..64ce0a12de8cfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -106,9 +104,7 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -175,9 +171,7 @@ define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -233,9 +227,7 @@ define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -279,9 +271,7 @@ define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -337,9 +327,7 @@ define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -383,9 +371,7 @@ define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -441,9 +427,7 @@ define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -465,9 +449,7 @@ define <2 x float> @vfmul_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -491,9 +473,7 @@ define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -515,9 +495,7 @@ define <4 x float> @vfmul_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -541,9 +519,7 @@ define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -565,9 +541,7 @@ define <8 x float> @vfmul_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -591,9 +565,7 @@ define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -615,9 +587,7 @@ define <16 x float> @vfmul_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -641,9 +611,7 @@ define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -665,9 +633,7 @@ define <2 x double> @vfmul_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -691,9 +657,7 @@ define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -715,9 +679,7 @@ define <4 x double> @vfmul_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -741,9 +703,7 @@ define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -765,9 +725,7 @@ define <8 x double> @vfmul_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -791,9 +749,7 @@ define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -815,9 +771,7 @@ define <16 x double> @vfmul_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -841,8 +795,6 @@ define <16 x double> @vfmul_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 4af566cb5f55ee..288efb0f1fc27c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -23,9 +23,7 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x ha
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -49,9 +47,7 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -74,9 +70,7 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x ha
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -100,9 +94,7 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -125,9 +117,7 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x ha
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -151,9 +141,7 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -176,9 +164,7 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -202,9 +188,7 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -227,9 +211,7 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -253,9 +235,7 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -278,9 +258,7 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -304,9 +282,7 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -329,9 +305,7 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -355,9 +329,7 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -380,9 +352,7 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -406,9 +376,7 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -431,9 +399,7 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -457,9 +423,7 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -482,9 +446,7 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -508,9 +470,7 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -533,9 +493,7 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -559,9 +517,7 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -588,9 +544,7 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -617,9 +571,7 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -643,9 +595,7 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -796,8 +746,6 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
index 3d037a5589a1d1..c36ec25c04f93f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
@@ -46,9 +46,7 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -202,9 +194,7 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -226,9 +216,7 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -250,9 +238,7 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -274,9 +260,7 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -298,9 +282,7 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -322,9 +304,7 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -346,9 +326,7 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -370,9 +348,7 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -394,9 +370,7 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -445,8 +419,6 @@ define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v16, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll
index 7dc92106665491..6d65ab4083f7ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll
@@ -16,11 +16,9 @@ define <2 x half> @vfnmacc_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -32,12 +30,10 @@ define <2 x half> @vfnmacc_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -50,11 +46,9 @@ define <2 x half> @vfnmacc_vf_v2f16(<2 x half> %a, half %b, <2 x half> %c, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -68,11 +62,9 @@ define <2 x half> @vfnmacc_vf_v2f16_commute(<2 x half> %a, half %b, <2 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -86,12 +78,10 @@ define <2 x half> @vfnmacc_vf_v2f16_unmasked(<2 x half> %a, half %b, <2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -102,11 +92,9 @@ define <2 x half> @vfnmacc_vv_v2f16_ta(<2 x half> %a, <2 x half> %b, <2 x half>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -120,11 +108,9 @@ define <2 x half> @vfnmacc_vf_v2f16_ta(<2 x half> %a, half %b, <2 x half> %c, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -138,11 +124,9 @@ define <2 x half> @vfnmacc_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -159,11 +143,9 @@ define <4 x half> @vfnmacc_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -175,12 +157,10 @@ define <4 x half> @vfnmacc_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -193,11 +173,9 @@ define <4 x half> @vfnmacc_vf_v4f16(<4 x half> %a, half %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -211,11 +189,9 @@ define <4 x half> @vfnmacc_vf_v4f16_commute(<4 x half> %a, half %b, <4 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -229,12 +205,10 @@ define <4 x half> @vfnmacc_vf_v4f16_unmasked(<4 x half> %a, half %b, <4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -245,11 +219,9 @@ define <4 x half> @vfnmacc_vv_v4f16_ta(<4 x half> %a, <4 x half> %b, <4 x half>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -263,11 +235,9 @@ define <4 x half> @vfnmacc_vf_v4f16_ta(<4 x half> %a, half %b, <4 x half> %c, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -281,11 +251,9 @@ define <4 x half> @vfnmacc_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -302,11 +270,9 @@ define <8 x half> @vfnmacc_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -318,12 +284,10 @@ define <8 x half> @vfnmacc_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -336,11 +300,9 @@ define <8 x half> @vfnmacc_vf_v8f16(<8 x half> %a, half %b, <8 x half> %c, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -354,11 +316,9 @@ define <8 x half> @vfnmacc_vf_v8f16_commute(<8 x half> %a, half %b, <8 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -372,12 +332,10 @@ define <8 x half> @vfnmacc_vf_v8f16_unmasked(<8 x half> %a, half %b, <8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -388,11 +346,9 @@ define <8 x half> @vfnmacc_vv_v8f16_ta(<8 x half> %a, <8 x half> %b, <8 x half>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -406,11 +362,9 @@ define <8 x half> @vfnmacc_vf_v8f16_ta(<8 x half> %a, half %b, <8 x half> %c, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -424,11 +378,9 @@ define <8 x half> @vfnmacc_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -445,11 +397,9 @@ define <16 x half> @vfnmacc_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -461,12 +411,10 @@ define <16 x half> @vfnmacc_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -479,11 +427,9 @@ define <16 x half> @vfnmacc_vf_v16f16(<16 x half> %a, half %b, <16 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -497,11 +443,9 @@ define <16 x half> @vfnmacc_vf_v16f16_commute(<16 x half> %a, half %b, <16 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -515,12 +459,10 @@ define <16 x half> @vfnmacc_vf_v16f16_unmasked(<16 x half> %a, half %b, <16 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -531,11 +473,9 @@ define <16 x half> @vfnmacc_vv_v16f16_ta(<16 x half> %a, <16 x half> %b, <16 x h
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -549,11 +489,9 @@ define <16 x half> @vfnmacc_vf_v16f16_ta(<16 x half> %a, half %b, <16 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -567,11 +505,9 @@ define <16 x half> @vfnmacc_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -588,11 +524,9 @@ define <32 x half> @vfnmacc_vv_v32f16(<32 x half> %a, <32 x half> %b, <32 x half
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -604,12 +538,10 @@ define <32 x half> @vfnmacc_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -622,11 +554,9 @@ define <32 x half> @vfnmacc_vf_v32f16(<32 x half> %a, half %b, <32 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -640,11 +570,9 @@ define <32 x half> @vfnmacc_vf_v32f16_commute(<32 x half> %a, half %b, <32 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -658,12 +586,10 @@ define <32 x half> @vfnmacc_vf_v32f16_unmasked(<32 x half> %a, half %b, <32 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v32f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -674,11 +600,9 @@ define <32 x half> @vfnmacc_vv_v32f16_ta(<32 x half> %a, <32 x half> %b, <32 x h
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %b, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -692,11 +616,9 @@ define <32 x half> @vfnmacc_vf_v32f16_ta(<32 x half> %a, half %b, <32 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -710,11 +632,9 @@ define <32 x half> @vfnmacc_vf_v32f16_commute_ta(<32 x half> %a, half %b, <32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %negc, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <32 x half> @llvm.vp.fneg.v32f16(<32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v32f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %negc, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v32f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -731,11 +651,9 @@ define <2 x float> @vfnmacc_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -747,12 +665,10 @@ define <2 x float> @vfnmacc_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -765,11 +681,9 @@ define <2 x float> @vfnmacc_vf_v2f32(<2 x float> %a, float %b, <2 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -783,11 +697,9 @@ define <2 x float> @vfnmacc_vf_v2f32_commute(<2 x float> %a, float %b, <2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -801,12 +713,10 @@ define <2 x float> @vfnmacc_vf_v2f32_unmasked(<2 x float> %a, float %b, <2 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -817,11 +727,9 @@ define <2 x float> @vfnmacc_vv_v2f32_ta(<2 x float> %a, <2 x float> %b, <2 x flo
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -835,11 +743,9 @@ define <2 x float> @vfnmacc_vf_v2f32_ta(<2 x float> %a, float %b, <2 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -853,11 +759,9 @@ define <2 x float> @vfnmacc_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -874,11 +778,9 @@ define <4 x float> @vfnmacc_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -890,12 +792,10 @@ define <4 x float> @vfnmacc_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -908,11 +808,9 @@ define <4 x float> @vfnmacc_vf_v4f32(<4 x float> %a, float %b, <4 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -926,11 +824,9 @@ define <4 x float> @vfnmacc_vf_v4f32_commute(<4 x float> %a, float %b, <4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -944,12 +840,10 @@ define <4 x float> @vfnmacc_vf_v4f32_unmasked(<4 x float> %a, float %b, <4 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -960,11 +854,9 @@ define <4 x float> @vfnmacc_vv_v4f32_ta(<4 x float> %a, <4 x float> %b, <4 x flo
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -978,11 +870,9 @@ define <4 x float> @vfnmacc_vf_v4f32_ta(<4 x float> %a, float %b, <4 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -996,11 +886,9 @@ define <4 x float> @vfnmacc_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -1017,11 +905,9 @@ define <8 x float> @vfnmacc_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1033,12 +919,10 @@ define <8 x float> @vfnmacc_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -1051,11 +935,9 @@ define <8 x float> @vfnmacc_vf_v8f32(<8 x float> %a, float %b, <8 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1069,11 +951,9 @@ define <8 x float> @vfnmacc_vf_v8f32_commute(<8 x float> %a, float %b, <8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1087,12 +967,10 @@ define <8 x float> @vfnmacc_vf_v8f32_unmasked(<8 x float> %a, float %b, <8 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -1103,11 +981,9 @@ define <8 x float> @vfnmacc_vv_v8f32_ta(<8 x float> %a, <8 x float> %b, <8 x flo
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1121,11 +997,9 @@ define <8 x float> @vfnmacc_vf_v8f32_ta(<8 x float> %a, float %b, <8 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1139,11 +1013,9 @@ define <8 x float> @vfnmacc_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1160,11 +1032,9 @@ define <16 x float> @vfnmacc_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1176,12 +1046,10 @@ define <16 x float> @vfnmacc_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1194,11 +1062,9 @@ define <16 x float> @vfnmacc_vf_v16f32(<16 x float> %a, float %b, <16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1212,11 +1078,9 @@ define <16 x float> @vfnmacc_vf_v16f32_commute(<16 x float> %a, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1230,12 +1094,10 @@ define <16 x float> @vfnmacc_vf_v16f32_unmasked(<16 x float> %a, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1246,11 +1108,9 @@ define <16 x float> @vfnmacc_vv_v16f32_ta(<16 x float> %a, <16 x float> %b, <16
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1264,11 +1124,9 @@ define <16 x float> @vfnmacc_vf_v16f32_ta(<16 x float> %a, float %b, <16 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1282,11 +1140,9 @@ define <16 x float> @vfnmacc_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %negc, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %negc, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1303,11 +1159,9 @@ define <2 x double> @vfnmacc_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x dou
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1319,12 +1173,10 @@ define <2 x double> @vfnmacc_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1337,11 +1189,9 @@ define <2 x double> @vfnmacc_vf_v2f64(<2 x double> %a, double %b, <2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1355,11 +1205,9 @@ define <2 x double> @vfnmacc_vf_v2f64_commute(<2 x double> %a, double %b, <2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1373,12 +1221,10 @@ define <2 x double> @vfnmacc_vf_v2f64_unmasked(<2 x double> %a, double %b, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1389,11 +1235,9 @@ define <2 x double> @vfnmacc_vv_v2f64_ta(<2 x double> %a, <2 x double> %b, <2 x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1407,11 +1251,9 @@ define <2 x double> @vfnmacc_vf_v2f64_ta(<2 x double> %a, double %b, <2 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1425,11 +1267,9 @@ define <2 x double> @vfnmacc_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %negc, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %negc, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1446,11 +1286,9 @@ define <4 x double> @vfnmacc_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x dou
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1462,12 +1300,10 @@ define <4 x double> @vfnmacc_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b,
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1480,11 +1316,9 @@ define <4 x double> @vfnmacc_vf_v4f64(<4 x double> %a, double %b, <4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1498,11 +1332,9 @@ define <4 x double> @vfnmacc_vf_v4f64_commute(<4 x double> %a, double %b, <4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1516,12 +1348,10 @@ define <4 x double> @vfnmacc_vf_v4f64_unmasked(<4 x double> %a, double %b, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1532,11 +1362,9 @@ define <4 x double> @vfnmacc_vv_v4f64_ta(<4 x double> %a, <4 x double> %b, <4 x
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1550,11 +1378,9 @@ define <4 x double> @vfnmacc_vf_v4f64_ta(<4 x double> %a, double %b, <4 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1568,11 +1394,9 @@ define <4 x double> @vfnmacc_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %negc, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %negc, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1589,11 +1413,9 @@ define <8 x double> @vfnmacc_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1605,12 +1427,10 @@ define <8 x double> @vfnmacc_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b,
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1623,11 +1443,9 @@ define <8 x double> @vfnmacc_vf_v8f64(<8 x double> %a, double %b, <8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1641,11 +1459,9 @@ define <8 x double> @vfnmacc_vf_v8f64_commute(<8 x double> %a, double %b, <8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1659,12 +1475,10 @@ define <8 x double> @vfnmacc_vf_v8f64_unmasked(<8 x double> %a, double %b, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1675,11 +1489,9 @@ define <8 x double> @vfnmacc_vv_v8f64_ta(<8 x double> %a, <8 x double> %b, <8 x
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1693,11 +1505,9 @@ define <8 x double> @vfnmacc_vf_v8f64_ta(<8 x double> %a, double %b, <8 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1711,11 +1521,9 @@ define <8 x double> @vfnmacc_vf_v8f64_commute_ta(<8 x double> %a, double %b, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %negc, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %negc, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll
index 86605446815bf0..df705270664bc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll
@@ -16,10 +16,8 @@ define <2 x half> @vfnmsac_vv_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -31,11 +29,9 @@ define <2 x half> @vfnmsac_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -48,10 +44,8 @@ define <2 x half> @vfnmsac_vf_v2f16(<2 x half> %a, half %b, <2 x half> %c, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -65,10 +59,8 @@ define <2 x half> @vfnmsac_vf_v2f16_commute(<2 x half> %a, half %b, <2 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -82,11 +74,9 @@ define <2 x half> @vfnmsac_vf_v2f16_unmasked(<2 x half> %a, half %b, <2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> %allones, <2 x half> %v, <2 x half> %c, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x half> @llvm.vp.merge.v2f16(<2 x i1> splat (i1 -1), <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
 
@@ -97,10 +87,8 @@ define <2 x half> @vfnmsac_vv_v2f16_ta(<2 x half> %a, <2 x half> %b, <2 x half>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %b, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -114,10 +102,8 @@ define <2 x half> @vfnmsac_vf_v2f16_ta(<2 x half> %a, half %b, <2 x half> %c, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %nega, <2 x half> %vb, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -131,10 +117,8 @@ define <2 x half> @vfnmsac_vf_v2f16_commute_ta(<2 x half> %a, half %b, <2 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x half> @llvm.vp.fma.v2f16(<2 x half> %vb, <2 x half> %nega, <2 x half> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %m, <2 x half> %v, <2 x half> %c, i32 %evl)
   ret <2 x half> %u
 }
@@ -151,10 +135,8 @@ define <4 x half> @vfnmsac_vv_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -166,11 +148,9 @@ define <4 x half> @vfnmsac_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -183,10 +163,8 @@ define <4 x half> @vfnmsac_vf_v4f16(<4 x half> %a, half %b, <4 x half> %c, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -200,10 +178,8 @@ define <4 x half> @vfnmsac_vf_v4f16_commute(<4 x half> %a, half %b, <4 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -217,11 +193,9 @@ define <4 x half> @vfnmsac_vf_v4f16_unmasked(<4 x half> %a, half %b, <4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> %allones, <4 x half> %v, <4 x half> %c, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x half> @llvm.vp.merge.v4f16(<4 x i1> splat (i1 -1), <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
 
@@ -232,10 +206,8 @@ define <4 x half> @vfnmsac_vv_v4f16_ta(<4 x half> %a, <4 x half> %b, <4 x half>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %b, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -249,10 +221,8 @@ define <4 x half> @vfnmsac_vf_v4f16_ta(<4 x half> %a, half %b, <4 x half> %c, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %nega, <4 x half> %vb, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -266,10 +236,8 @@ define <4 x half> @vfnmsac_vf_v4f16_commute_ta(<4 x half> %a, half %b, <4 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x half> @llvm.vp.fma.v4f16(<4 x half> %vb, <4 x half> %nega, <4 x half> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %m, <4 x half> %v, <4 x half> %c, i32 %evl)
   ret <4 x half> %u
 }
@@ -286,10 +254,8 @@ define <8 x half> @vfnmsac_vv_v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -301,11 +267,9 @@ define <8 x half> @vfnmsac_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -318,10 +282,8 @@ define <8 x half> @vfnmsac_vf_v8f16(<8 x half> %a, half %b, <8 x half> %c, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -335,10 +297,8 @@ define <8 x half> @vfnmsac_vf_v8f16_commute(<8 x half> %a, half %b, <8 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -352,11 +312,9 @@ define <8 x half> @vfnmsac_vf_v8f16_unmasked(<8 x half> %a, half %b, <8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> %allones, <8 x half> %v, <8 x half> %c, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x half> @llvm.vp.merge.v8f16(<8 x i1> splat (i1 -1), <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
 
@@ -367,10 +325,8 @@ define <8 x half> @vfnmsac_vv_v8f16_ta(<8 x half> %a, <8 x half> %b, <8 x half>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %b, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -384,10 +340,8 @@ define <8 x half> @vfnmsac_vf_v8f16_ta(<8 x half> %a, half %b, <8 x half> %c, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %nega, <8 x half> %vb, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -401,10 +355,8 @@ define <8 x half> @vfnmsac_vf_v8f16_commute_ta(<8 x half> %a, half %b, <8 x half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %vb, <8 x half> %nega, <8 x half> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %m, <8 x half> %v, <8 x half> %c, i32 %evl)
   ret <8 x half> %u
 }
@@ -421,10 +373,8 @@ define <16 x half> @vfnmsac_vv_v16f16(<16 x half> %a, <16 x half> %b, <16 x half
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -436,11 +386,9 @@ define <16 x half> @vfnmsac_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -453,10 +401,8 @@ define <16 x half> @vfnmsac_vf_v16f16(<16 x half> %a, half %b, <16 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -470,10 +416,8 @@ define <16 x half> @vfnmsac_vf_v16f16_commute(<16 x half> %a, half %b, <16 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -487,11 +431,9 @@ define <16 x half> @vfnmsac_vf_v16f16_unmasked(<16 x half> %a, half %b, <16 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> %allones, <16 x half> %v, <16 x half> %c, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x half> @llvm.vp.merge.v16f16(<16 x i1> splat (i1 -1), <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
 
@@ -502,10 +444,8 @@ define <16 x half> @vfnmsac_vv_v16f16_ta(<16 x half> %a, <16 x half> %b, <16 x h
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %b, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -519,10 +459,8 @@ define <16 x half> @vfnmsac_vf_v16f16_ta(<16 x half> %a, half %b, <16 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %nega, <16 x half> %vb, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -536,10 +474,8 @@ define <16 x half> @vfnmsac_vf_v16f16_commute_ta(<16 x half> %a, half %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %vb, <16 x half> %nega, <16 x half> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %m, <16 x half> %v, <16 x half> %c, i32 %evl)
   ret <16 x half> %u
 }
@@ -556,10 +492,8 @@ define <32 x half> @vfnmsac_vv_v26f16(<32 x half> %a, <32 x half> %b, <32 x half
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -571,11 +505,9 @@ define <32 x half> @vfnmsac_vv_v26f16_unmasked(<32 x half> %a, <32 x half> %b, <
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -588,10 +520,8 @@ define <32 x half> @vfnmsac_vf_v26f16(<32 x half> %a, half %b, <32 x half> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -605,10 +535,8 @@ define <32 x half> @vfnmsac_vf_v26f16_commute(<32 x half> %a, half %b, <32 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -622,11 +550,9 @@ define <32 x half> @vfnmsac_vf_v26f16_unmasked(<32 x half> %a, half %b, <32 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> %allones, <32 x half> %v, <32 x half> %c, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x half> @llvm.vp.merge.v26f16(<32 x i1> splat (i1 -1), <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
 
@@ -637,10 +563,8 @@ define <32 x half> @vfnmsac_vv_v26f16_ta(<32 x half> %a, <32 x half> %b, <32 x h
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %b, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -654,10 +578,8 @@ define <32 x half> @vfnmsac_vf_v26f16_ta(<32 x half> %a, half %b, <32 x half> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %nega, <32 x half> %vb, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -671,10 +593,8 @@ define <32 x half> @vfnmsac_vf_v26f16_commute_ta(<32 x half> %a, half %b, <32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x half> poison, half %b, i32 0
   %vb = shufflevector <32 x half> %elt.head, <32 x half> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> %allones, i32 %evl)
-  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %c, <32 x i1> %allones, i32 %evl)
+  %nega = call <32 x half> @llvm.vp.fneg.v26f16(<32 x half> %a, <32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <32 x half> @llvm.vp.fma.v26f16(<32 x half> %vb, <32 x half> %nega, <32 x half> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x half> @llvm.vp.select.v26f16(<32 x i1> %m, <32 x half> %v, <32 x half> %c, i32 %evl)
   ret <32 x half> %u
 }
@@ -691,10 +611,8 @@ define <2 x float> @vfnmsac_vv_v2f32(<2 x float> %a, <2 x float> %b, <2 x float>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -706,11 +624,9 @@ define <2 x float> @vfnmsac_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -723,10 +639,8 @@ define <2 x float> @vfnmsac_vf_v2f32(<2 x float> %a, float %b, <2 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -740,10 +654,8 @@ define <2 x float> @vfnmsac_vf_v2f32_commute(<2 x float> %a, float %b, <2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -757,11 +669,9 @@ define <2 x float> @vfnmsac_vf_v2f32_unmasked(<2 x float> %a, float %b, <2 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> %allones, <2 x float> %v, <2 x float> %c, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x float> @llvm.vp.merge.v2f32(<2 x i1> splat (i1 -1), <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
 
@@ -772,10 +682,8 @@ define <2 x float> @vfnmsac_vv_v2f32_ta(<2 x float> %a, <2 x float> %b, <2 x flo
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %b, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -789,10 +697,8 @@ define <2 x float> @vfnmsac_vf_v2f32_ta(<2 x float> %a, float %b, <2 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %nega, <2 x float> %vb, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -806,10 +712,8 @@ define <2 x float> @vfnmsac_vf_v2f32_commute_ta(<2 x float> %a, float %b, <2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %vb, <2 x float> %nega, <2 x float> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %m, <2 x float> %v, <2 x float> %c, i32 %evl)
   ret <2 x float> %u
 }
@@ -826,10 +730,8 @@ define <4 x float> @vfnmsac_vv_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -841,11 +743,9 @@ define <4 x float> @vfnmsac_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -858,10 +758,8 @@ define <4 x float> @vfnmsac_vf_v4f32(<4 x float> %a, float %b, <4 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -875,10 +773,8 @@ define <4 x float> @vfnmsac_vf_v4f32_commute(<4 x float> %a, float %b, <4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -892,11 +788,9 @@ define <4 x float> @vfnmsac_vf_v4f32_unmasked(<4 x float> %a, float %b, <4 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> %allones, <4 x float> %v, <4 x float> %c, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x float> @llvm.vp.merge.v4f32(<4 x i1> splat (i1 -1), <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
 
@@ -907,10 +801,8 @@ define <4 x float> @vfnmsac_vv_v4f32_ta(<4 x float> %a, <4 x float> %b, <4 x flo
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %b, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -924,10 +816,8 @@ define <4 x float> @vfnmsac_vf_v4f32_ta(<4 x float> %a, float %b, <4 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %nega, <4 x float> %vb, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -941,10 +831,8 @@ define <4 x float> @vfnmsac_vf_v4f32_commute_ta(<4 x float> %a, float %b, <4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %vb, <4 x float> %nega, <4 x float> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %m, <4 x float> %v, <4 x float> %c, i32 %evl)
   ret <4 x float> %u
 }
@@ -961,10 +849,8 @@ define <8 x float> @vfnmsac_vv_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -976,11 +862,9 @@ define <8 x float> @vfnmsac_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -993,10 +877,8 @@ define <8 x float> @vfnmsac_vf_v8f32(<8 x float> %a, float %b, <8 x float> %c, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1010,10 +892,8 @@ define <8 x float> @vfnmsac_vf_v8f32_commute(<8 x float> %a, float %b, <8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1027,11 +907,9 @@ define <8 x float> @vfnmsac_vf_v8f32_unmasked(<8 x float> %a, float %b, <8 x flo
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %allones, <8 x float> %v, <8 x float> %c, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> splat (i1 -1), <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
 
@@ -1042,10 +920,8 @@ define <8 x float> @vfnmsac_vv_v8f32_ta(<8 x float> %a, <8 x float> %b, <8 x flo
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %b, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1059,10 +935,8 @@ define <8 x float> @vfnmsac_vf_v8f32_ta(<8 x float> %a, float %b, <8 x float> %c
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %nega, <8 x float> %vb, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1076,10 +950,8 @@ define <8 x float> @vfnmsac_vf_v8f32_commute_ta(<8 x float> %a, float %b, <8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %vb, <8 x float> %nega, <8 x float> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %v, <8 x float> %c, i32 %evl)
   ret <8 x float> %u
 }
@@ -1096,10 +968,8 @@ define <16 x float> @vfnmsac_vv_v16f32(<16 x float> %a, <16 x float> %b, <16 x f
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1111,11 +981,9 @@ define <16 x float> @vfnmsac_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1128,10 +996,8 @@ define <16 x float> @vfnmsac_vf_v16f32(<16 x float> %a, float %b, <16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1145,10 +1011,8 @@ define <16 x float> @vfnmsac_vf_v16f32_commute(<16 x float> %a, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1162,11 +1026,9 @@ define <16 x float> @vfnmsac_vf_v16f32_unmasked(<16 x float> %a, float %b, <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> %allones, <16 x float> %v, <16 x float> %c, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x float> @llvm.vp.merge.v16f32(<16 x i1> splat (i1 -1), <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
 
@@ -1177,10 +1039,8 @@ define <16 x float> @vfnmsac_vv_v16f32_ta(<16 x float> %a, <16 x float> %b, <16
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %b, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1194,10 +1054,8 @@ define <16 x float> @vfnmsac_vf_v16f32_ta(<16 x float> %a, float %b, <16 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %nega, <16 x float> %vb, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1211,10 +1069,8 @@ define <16 x float> @vfnmsac_vf_v16f32_commute_ta(<16 x float> %a, float %b, <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> %allones, i32 %evl)
-  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %c, <16 x i1> %allones, i32 %evl)
+  %nega = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %a, <16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %vb, <16 x float> %nega, <16 x float> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %m, <16 x float> %v, <16 x float> %c, i32 %evl)
   ret <16 x float> %u
 }
@@ -1231,10 +1087,8 @@ define <2 x double> @vfnmsac_vv_v2f64(<2 x double> %a, <2 x double> %b, <2 x dou
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1246,11 +1100,9 @@ define <2 x double> @vfnmsac_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1263,10 +1115,8 @@ define <2 x double> @vfnmsac_vf_v2f64(<2 x double> %a, double %b, <2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1280,10 +1130,8 @@ define <2 x double> @vfnmsac_vf_v2f64_commute(<2 x double> %a, double %b, <2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1297,11 +1145,9 @@ define <2 x double> @vfnmsac_vf_v2f64_unmasked(<2 x double> %a, double %b, <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> %allones, <2 x double> %v, <2 x double> %c, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x double> @llvm.vp.merge.v2f64(<2 x i1> splat (i1 -1), <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
 
@@ -1312,10 +1158,8 @@ define <2 x double> @vfnmsac_vv_v2f64_ta(<2 x double> %a, <2 x double> %b, <2 x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %b, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1329,10 +1173,8 @@ define <2 x double> @vfnmsac_vf_v2f64_ta(<2 x double> %a, double %b, <2 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %nega, <2 x double> %vb, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1346,10 +1188,8 @@ define <2 x double> @vfnmsac_vf_v2f64_commute_ta(<2 x double> %a, double %b, <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> %allones, i32 %evl)
-  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %c, <2 x i1> %allones, i32 %evl)
+  %nega = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %a, <2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %vb, <2 x double> %nega, <2 x double> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %m, <2 x double> %v, <2 x double> %c, i32 %evl)
   ret <2 x double> %u
 }
@@ -1366,10 +1206,8 @@ define <4 x double> @vfnmsac_vv_v4f64(<4 x double> %a, <4 x double> %b, <4 x dou
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1381,11 +1219,9 @@ define <4 x double> @vfnmsac_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b,
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1398,10 +1234,8 @@ define <4 x double> @vfnmsac_vf_v4f64(<4 x double> %a, double %b, <4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1415,10 +1249,8 @@ define <4 x double> @vfnmsac_vf_v4f64_commute(<4 x double> %a, double %b, <4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1432,11 +1264,9 @@ define <4 x double> @vfnmsac_vf_v4f64_unmasked(<4 x double> %a, double %b, <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> %allones, <4 x double> %v, <4 x double> %c, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x double> @llvm.vp.merge.v4f64(<4 x i1> splat (i1 -1), <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
 
@@ -1447,10 +1277,8 @@ define <4 x double> @vfnmsac_vv_v4f64_ta(<4 x double> %a, <4 x double> %b, <4 x
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %b, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1464,10 +1292,8 @@ define <4 x double> @vfnmsac_vf_v4f64_ta(<4 x double> %a, double %b, <4 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %nega, <4 x double> %vb, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1481,10 +1307,8 @@ define <4 x double> @vfnmsac_vf_v4f64_commute_ta(<4 x double> %a, double %b, <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> %allones, i32 %evl)
-  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %c, <4 x i1> %allones, i32 %evl)
+  %nega = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %a, <4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %vb, <4 x double> %nega, <4 x double> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %m, <4 x double> %v, <4 x double> %c, i32 %evl)
   ret <4 x double> %u
 }
@@ -1501,10 +1325,8 @@ define <8 x double> @vfnmsac_vv_v8f64(<8 x double> %a, <8 x double> %b, <8 x dou
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1516,11 +1338,9 @@ define <8 x double> @vfnmsac_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b,
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1533,10 +1353,8 @@ define <8 x double> @vfnmsac_vf_v8f64(<8 x double> %a, double %b, <8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1550,10 +1368,8 @@ define <8 x double> @vfnmsac_vf_v8f64_commute(<8 x double> %a, double %b, <8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1567,11 +1383,9 @@ define <8 x double> @vfnmsac_vf_v8f64_unmasked(<8 x double> %a, double %b, <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %allones, <8 x double> %v, <8 x double> %c, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> splat (i1 -1), <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
 
@@ -1582,10 +1396,8 @@ define <8 x double> @vfnmsac_vv_v8f64_ta(<8 x double> %a, <8 x double> %b, <8 x
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %b, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1599,10 +1411,8 @@ define <8 x double> @vfnmsac_vf_v8f64_ta(<8 x double> %a, double %b, <8 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %nega, <8 x double> %vb, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
@@ -1616,10 +1426,8 @@ define <8 x double> @vfnmsac_vf_v8f64_commute_ta(<8 x double> %a, double %b, <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> %allones, i32 %evl)
-  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %c, <8 x i1> %allones, i32 %evl)
+  %nega = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %a, <8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %vb, <8 x double> %nega, <8 x double> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %v, <8 x double> %c, i32 %evl)
   ret <8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll
index db7de5907769fc..bd354b7dae8037 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll
@@ -26,9 +26,7 @@ define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -54,9 +52,7 @@ define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -82,9 +78,7 @@ define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -110,9 +104,7 @@ define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -166,9 +156,7 @@ define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -194,9 +182,7 @@ define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -222,9 +208,7 @@ define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -250,9 +234,7 @@ define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -278,9 +260,7 @@ define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -306,9 +286,7 @@ define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -334,8 +312,6 @@ define <16 x double> @vfrdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll
index 3391abf8740a78..0903ef8c8ec3fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll
@@ -26,9 +26,7 @@ define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -54,9 +52,7 @@ define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -82,9 +78,7 @@ define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -110,9 +104,7 @@ define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -166,9 +156,7 @@ define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -194,9 +182,7 @@ define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zero
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -222,9 +208,7 @@ define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -250,9 +234,7 @@ define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -278,9 +260,7 @@ define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -306,9 +286,7 @@ define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 z
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -334,8 +312,6 @@ define <16 x double> @vfrsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
index 60022644e5ab68..6004eb4fe217a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
@@ -46,9 +46,7 @@ define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.sqrt.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.sqrt.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.sqrt.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.sqrt.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.sqrt.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.sqrt.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <16 x half> @vfsqrt_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.sqrt.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.sqrt.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -202,9 +194,7 @@ define <2 x float> @vfsqrt_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.sqrt.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.sqrt.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -226,9 +216,7 @@ define <4 x float> @vfsqrt_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -250,9 +238,7 @@ define <8 x float> @vfsqrt_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.sqrt.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.sqrt.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -274,9 +260,7 @@ define <16 x float> @vfsqrt_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.sqrt.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.sqrt.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -298,9 +282,7 @@ define <2 x double> @vfsqrt_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.sqrt.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.sqrt.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -322,9 +304,7 @@ define <4 x double> @vfsqrt_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.sqrt.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.sqrt.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -346,9 +326,7 @@ define <8 x double> @vfsqrt_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.sqrt.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.sqrt.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -370,9 +348,7 @@ define <15 x double> @vfsqrt_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
-  %v = call <15 x double> @llvm.vp.sqrt.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
+  %v = call <15 x double> @llvm.vp.sqrt.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x double> %v
 }
 
@@ -394,9 +370,7 @@ define <16 x double> @vfsqrt_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.sqrt.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.sqrt.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -445,8 +419,6 @@ define <32 x double> @vfsqrt_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
+  %v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
index 76ca7d971eb49a..eb717a851ed467 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
@@ -48,9 +48,7 @@ define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -106,9 +104,7 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x half> poison, half %b, i32 0
   %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %v
 }
 
@@ -175,9 +171,7 @@ define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -233,9 +227,7 @@ define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x half> poison, half %b, i32 0
   %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -279,9 +271,7 @@ define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 ; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -337,9 +327,7 @@ define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x half> poison, half %b, i32 0
   %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x half> %v
 }
 
@@ -383,9 +371,7 @@ define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 ; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -441,9 +427,7 @@ define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroe
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x half> poison, half %b, i32 0
   %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x half> %v
 }
 
@@ -465,9 +449,7 @@ define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -491,9 +473,7 @@ define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x float> %v
 }
 
@@ -515,9 +495,7 @@ define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -541,9 +519,7 @@ define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -565,9 +541,7 @@ define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -591,9 +565,7 @@ define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %v
 }
 
@@ -615,9 +587,7 @@ define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -641,9 +611,7 @@ define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x float> %v
 }
 
@@ -665,9 +633,7 @@ define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -691,9 +657,7 @@ define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x double> %v
 }
 
@@ -715,9 +679,7 @@ define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -741,9 +703,7 @@ define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %v
 }
 
@@ -765,9 +725,7 @@ define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -791,9 +749,7 @@ define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x double> %v
 }
 
@@ -815,9 +771,7 @@ define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
 
@@ -841,8 +795,6 @@ define <16 x double> @vfsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
index 792c6231aa4a57..2181fd8498f5af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmacc-vp.ll
@@ -16,10 +16,8 @@ define <2 x i8> @vmacc_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c,  <2 x i1
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -31,11 +29,9 @@ define <2 x i8> @vmacc_vv_nxv2i8_unmasked(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %allones, <2 x i8> %y, <2 x i8> %c, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> splat (i1 -1), <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
 
@@ -48,10 +44,8 @@ define <2 x i8> @vmacc_vx_nxv2i8(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -65,11 +59,9 @@ define <2 x i8> @vmacc_vx_nxv2i8_unmasked(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %allones, <2 x i8> %y, <2 x i8> %c, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> splat (i1 -1), <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
 
@@ -80,10 +72,8 @@ define <2 x i8> @vmacc_vv_nxv2i8_ta(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c,  <2 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -97,10 +87,8 @@ define <2 x i8> @vmacc_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2 x i1> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.add.nxv2i8(<2 x i8> %x, <2 x i8> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -117,10 +105,8 @@ define <4 x i8> @vmacc_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c,  <4 x i1
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -132,11 +118,9 @@ define <4 x i8> @vmacc_vv_nxv4i8_unmasked(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %allones, <4 x i8> %y, <4 x i8> %c, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> splat (i1 -1), <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
 
@@ -149,10 +133,8 @@ define <4 x i8> @vmacc_vx_nxv4i8(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -166,11 +148,9 @@ define <4 x i8> @vmacc_vx_nxv4i8_unmasked(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %allones, <4 x i8> %y, <4 x i8> %c, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> splat (i1 -1), <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
 
@@ -181,10 +161,8 @@ define <4 x i8> @vmacc_vv_nxv4i8_ta(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c,  <4 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -198,10 +176,8 @@ define <4 x i8> @vmacc_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4 x i1> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.add.nxv4i8(<4 x i8> %x, <4 x i8> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -218,10 +194,8 @@ define <8 x i8> @vmacc_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c,  <8 x i1
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -233,11 +207,9 @@ define <8 x i8> @vmacc_vv_nxv8i8_unmasked(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %allones, <8 x i8> %y, <8 x i8> %c, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> splat (i1 -1), <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
 
@@ -250,10 +222,8 @@ define <8 x i8> @vmacc_vx_nxv8i8(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -267,11 +237,9 @@ define <8 x i8> @vmacc_vx_nxv8i8_unmasked(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %allones, <8 x i8> %y, <8 x i8> %c, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> splat (i1 -1), <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
 
@@ -282,10 +250,8 @@ define <8 x i8> @vmacc_vv_nxv8i8_ta(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c,  <8 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -299,10 +265,8 @@ define <8 x i8> @vmacc_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8 x i1> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.add.nxv8i8(<8 x i8> %x, <8 x i8> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -319,10 +283,8 @@ define <16 x i8> @vmacc_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c,  <1
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -334,11 +296,9 @@ define <16 x i8> @vmacc_vv_nxv16i8_unmasked(<16 x i8> %a, <16 x i8> %b, <16 x i8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %allones, <16 x i8> %y, <16 x i8> %c, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> splat (i1 -1), <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
 
@@ -351,10 +311,8 @@ define <16 x i8> @vmacc_vx_nxv16i8(<16 x i8> %a, i8 %b, <16 x i8> %c,  <16 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -368,11 +326,9 @@ define <16 x i8> @vmacc_vx_nxv16i8_unmasked(<16 x i8> %a, i8 %b, <16 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %allones, <16 x i8> %y, <16 x i8> %c, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> splat (i1 -1), <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
 
@@ -383,10 +339,8 @@ define <16 x i8> @vmacc_vv_nxv16i8_ta(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -400,10 +354,8 @@ define <16 x i8> @vmacc_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c,  <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.add.nxv16i8(<16 x i8> %x, <16 x i8> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -420,10 +372,8 @@ define <32 x i8> @vmacc_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c,  <3
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -435,11 +385,9 @@ define <32 x i8> @vmacc_vv_nxv32i8_unmasked(<32 x i8> %a, <32 x i8> %b, <32 x i8
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %allones, <32 x i8> %y, <32 x i8> %c, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> splat (i1 -1), <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
 
@@ -452,10 +400,8 @@ define <32 x i8> @vmacc_vx_nxv32i8(<32 x i8> %a, i8 %b, <32 x i8> %c,  <32 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -469,11 +415,9 @@ define <32 x i8> @vmacc_vx_nxv32i8_unmasked(<32 x i8> %a, i8 %b, <32 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %allones, <32 x i8> %y, <32 x i8> %c, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> splat (i1 -1), <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
 
@@ -484,10 +428,8 @@ define <32 x i8> @vmacc_vv_nxv32i8_ta(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -501,10 +443,8 @@ define <32 x i8> @vmacc_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c,  <32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.add.nxv32i8(<32 x i8> %x, <32 x i8> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -521,10 +461,8 @@ define <64 x i8> @vmacc_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c,  <6
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -536,11 +474,9 @@ define <64 x i8> @vmacc_vv_nxv64i8_unmasked(<64 x i8> %a, <64 x i8> %b, <64 x i8
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
-  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %allones, <64 x i8> %y, <64 x i8> %c, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> splat (i1 -1), <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
 
@@ -553,10 +489,8 @@ define <64 x i8> @vmacc_vx_nxv64i8(<64 x i8> %a, i8 %b, <64 x i8> %c,  <64 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -570,11 +504,9 @@ define <64 x i8> @vmacc_vx_nxv64i8_unmasked(<64 x i8> %a, i8 %b, <64 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
-  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %allones, <64 x i8> %y, <64 x i8> %c, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> splat (i1 -1), <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
 
@@ -585,10 +517,8 @@ define <64 x i8> @vmacc_vv_nxv64i8_ta(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c,
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -602,10 +532,8 @@ define <64 x i8> @vmacc_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c,  <64 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.add.nxv64i8(<64 x i8> %x, <64 x i8> %c, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -622,10 +550,8 @@ define <2 x i16> @vmacc_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c,  <2
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -637,11 +563,9 @@ define <2 x i16> @vmacc_vv_nxv2i16_unmasked(<2 x i16> %a, <2 x i16> %b, <2 x i16
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %allones, <2 x i16> %y, <2 x i16> %c, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> splat (i1 -1), <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
 
@@ -654,10 +578,8 @@ define <2 x i16> @vmacc_vx_nxv2i16(<2 x i16> %a, i16 %b, <2 x i16> %c,  <2 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -671,11 +593,9 @@ define <2 x i16> @vmacc_vx_nxv2i16_unmasked(<2 x i16> %a, i16 %b, <2 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %allones, <2 x i16> %y, <2 x i16> %c, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> splat (i1 -1), <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
 
@@ -686,10 +606,8 @@ define <2 x i16> @vmacc_vv_nxv2i16_ta(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -703,10 +621,8 @@ define <2 x i16> @vmacc_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c,  <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.add.nxv2i16(<2 x i16> %x, <2 x i16> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -723,10 +639,8 @@ define <4 x i16> @vmacc_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c,  <4
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -738,11 +652,9 @@ define <4 x i16> @vmacc_vv_nxv4i16_unmasked(<4 x i16> %a, <4 x i16> %b, <4 x i16
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %allones, <4 x i16> %y, <4 x i16> %c, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> splat (i1 -1), <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
 
@@ -755,10 +667,8 @@ define <4 x i16> @vmacc_vx_nxv4i16(<4 x i16> %a, i16 %b, <4 x i16> %c,  <4 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -772,11 +682,9 @@ define <4 x i16> @vmacc_vx_nxv4i16_unmasked(<4 x i16> %a, i16 %b, <4 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %allones, <4 x i16> %y, <4 x i16> %c, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> splat (i1 -1), <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
 
@@ -787,10 +695,8 @@ define <4 x i16> @vmacc_vv_nxv4i16_ta(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -804,10 +710,8 @@ define <4 x i16> @vmacc_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c,  <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.add.nxv4i16(<4 x i16> %x, <4 x i16> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -824,10 +728,8 @@ define <8 x i16> @vmacc_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c,  <8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -839,11 +741,9 @@ define <8 x i16> @vmacc_vv_nxv8i16_unmasked(<8 x i16> %a, <8 x i16> %b, <8 x i16
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %allones, <8 x i16> %y, <8 x i16> %c, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> splat (i1 -1), <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
 
@@ -856,10 +756,8 @@ define <8 x i16> @vmacc_vx_nxv8i16(<8 x i16> %a, i16 %b, <8 x i16> %c,  <8 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -873,11 +771,9 @@ define <8 x i16> @vmacc_vx_nxv8i16_unmasked(<8 x i16> %a, i16 %b, <8 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %allones, <8 x i16> %y, <8 x i16> %c, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> splat (i1 -1), <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
 
@@ -888,10 +784,8 @@ define <8 x i16> @vmacc_vv_nxv8i16_ta(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -905,10 +799,8 @@ define <8 x i16> @vmacc_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c,  <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.add.nxv8i16(<8 x i16> %x, <8 x i16> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -925,10 +817,8 @@ define <16 x i16> @vmacc_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -940,11 +830,9 @@ define <16 x i16> @vmacc_vv_nxv16i16_unmasked(<16 x i16> %a, <16 x i16> %b, <16
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %allones, <16 x i16> %y, <16 x i16> %c, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> splat (i1 -1), <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
 
@@ -957,10 +845,8 @@ define <16 x i16> @vmacc_vx_nxv16i16(<16 x i16> %a, i16 %b, <16 x i16> %c,  <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -974,11 +860,9 @@ define <16 x i16> @vmacc_vx_nxv16i16_unmasked(<16 x i16> %a, i16 %b, <16 x i16>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %allones, <16 x i16> %y, <16 x i16> %c, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> splat (i1 -1), <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
 
@@ -989,10 +873,8 @@ define <16 x i16> @vmacc_vv_nxv16i16_ta(<16 x i16> %a, <16 x i16> %b, <16 x i16>
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -1006,10 +888,8 @@ define <16 x i16> @vmacc_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c,  <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.add.nxv16i16(<16 x i16> %x, <16 x i16> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -1026,10 +906,8 @@ define <32 x i16> @vmacc_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1041,11 +919,9 @@ define <32 x i16> @vmacc_vv_nxv32i16_unmasked(<32 x i16> %a, <32 x i16> %b, <32
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %allones, <32 x i16> %y, <32 x i16> %c, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> splat (i1 -1), <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
 
@@ -1058,10 +934,8 @@ define <32 x i16> @vmacc_vx_nxv32i16(<32 x i16> %a, i16 %b, <32 x i16> %c,  <32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1075,11 +949,9 @@ define <32 x i16> @vmacc_vx_nxv32i16_unmasked(<32 x i16> %a, i16 %b, <32 x i16>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %allones, <32 x i16> %y, <32 x i16> %c, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> splat (i1 -1), <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
 
@@ -1090,10 +962,8 @@ define <32 x i16> @vmacc_vv_nxv32i16_ta(<32 x i16> %a, <32 x i16> %b, <32 x i16>
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1107,10 +977,8 @@ define <32 x i16> @vmacc_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c,  <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.add.nxv32i16(<32 x i16> %x, <32 x i16> %c, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1127,10 +995,8 @@ define <2 x i32> @vmacc_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c,  <2
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1142,11 +1008,9 @@ define <2 x i32> @vmacc_vv_nxv2i32_unmasked(<2 x i32> %a, <2 x i32> %b, <2 x i32
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %allones, <2 x i32> %y, <2 x i32> %c, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> splat (i1 -1), <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
 
@@ -1159,10 +1023,8 @@ define <2 x i32> @vmacc_vx_nxv2i32(<2 x i32> %a, i32 %b, <2 x i32> %c,  <2 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1176,11 +1038,9 @@ define <2 x i32> @vmacc_vx_nxv2i32_unmasked(<2 x i32> %a, i32 %b, <2 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %allones, <2 x i32> %y, <2 x i32> %c, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> splat (i1 -1), <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
 
@@ -1191,10 +1051,8 @@ define <2 x i32> @vmacc_vv_nxv2i32_ta(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1208,10 +1066,8 @@ define <2 x i32> @vmacc_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c,  <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.add.nxv2i32(<2 x i32> %x, <2 x i32> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1228,10 +1084,8 @@ define <4 x i32> @vmacc_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c,  <4
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1243,11 +1097,9 @@ define <4 x i32> @vmacc_vv_nxv4i32_unmasked(<4 x i32> %a, <4 x i32> %b, <4 x i32
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %allones, <4 x i32> %y, <4 x i32> %c, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> splat (i1 -1), <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
 
@@ -1260,10 +1112,8 @@ define <4 x i32> @vmacc_vx_nxv4i32(<4 x i32> %a, i32 %b, <4 x i32> %c,  <4 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1277,11 +1127,9 @@ define <4 x i32> @vmacc_vx_nxv4i32_unmasked(<4 x i32> %a, i32 %b, <4 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %allones, <4 x i32> %y, <4 x i32> %c, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> splat (i1 -1), <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
 
@@ -1292,10 +1140,8 @@ define <4 x i32> @vmacc_vv_nxv4i32_ta(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1309,10 +1155,8 @@ define <4 x i32> @vmacc_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c,  <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.add.nxv4i32(<4 x i32> %x, <4 x i32> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1329,10 +1173,8 @@ define <8 x i32> @vmacc_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c,  <8
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1344,11 +1186,9 @@ define <8 x i32> @vmacc_vv_nxv8i32_unmasked(<8 x i32> %a, <8 x i32> %b, <8 x i32
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %allones, <8 x i32> %y, <8 x i32> %c, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> splat (i1 -1), <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
 
@@ -1361,10 +1201,8 @@ define <8 x i32> @vmacc_vx_nxv8i32(<8 x i32> %a, i32 %b, <8 x i32> %c,  <8 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1378,11 +1216,9 @@ define <8 x i32> @vmacc_vx_nxv8i32_unmasked(<8 x i32> %a, i32 %b, <8 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %allones, <8 x i32> %y, <8 x i32> %c, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> splat (i1 -1), <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
 
@@ -1393,10 +1229,8 @@ define <8 x i32> @vmacc_vv_nxv8i32_ta(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c,
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1410,10 +1244,8 @@ define <8 x i32> @vmacc_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c,  <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.add.nxv8i32(<8 x i32> %x, <8 x i32> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1430,10 +1262,8 @@ define <16 x i32> @vmacc_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1445,11 +1275,9 @@ define <16 x i32> @vmacc_vv_nxv16i32_unmasked(<16 x i32> %a, <16 x i32> %b, <16
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %allones, <16 x i32> %y, <16 x i32> %c, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> splat (i1 -1), <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
 
@@ -1462,10 +1290,8 @@ define <16 x i32> @vmacc_vx_nxv16i32(<16 x i32> %a, i32 %b, <16 x i32> %c,  <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1479,11 +1305,9 @@ define <16 x i32> @vmacc_vx_nxv16i32_unmasked(<16 x i32> %a, i32 %b, <16 x i32>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %allones, <16 x i32> %y, <16 x i32> %c, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> splat (i1 -1), <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
 
@@ -1494,10 +1318,8 @@ define <16 x i32> @vmacc_vv_nxv16i32_ta(<16 x i32> %a, <16 x i32> %b, <16 x i32>
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1511,10 +1333,8 @@ define <16 x i32> @vmacc_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c,  <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.add.nxv16i32(<16 x i32> %x, <16 x i32> %c, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1531,10 +1351,8 @@ define <2 x i64> @vmacc_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,  <2
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1546,11 +1364,9 @@ define <2 x i64> @vmacc_vv_nxv2i64_unmasked(<2 x i64> %a, <2 x i64> %b, <2 x i64
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %allones, <2 x i64> %y, <2 x i64> %c, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> splat (i1 -1), <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
 
@@ -1578,10 +1394,8 @@ define <2 x i64> @vmacc_vx_nxv2i64(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x i1>
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1610,11 +1424,9 @@ define <2 x i64> @vmacc_vx_nxv2i64_unmasked(<2 x i64> %a, i64 %b, <2 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %allones, <2 x i64> %y, <2 x i64> %c, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> splat (i1 -1), <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
 
@@ -1625,10 +1437,8 @@ define <2 x i64> @vmacc_vv_nxv2i64_ta(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1657,10 +1467,8 @@ define <2 x i64> @vmacc_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.add.nxv2i64(<2 x i64> %x, <2 x i64> %c, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1677,10 +1485,8 @@ define <4 x i64> @vmacc_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c,  <4
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1692,11 +1498,9 @@ define <4 x i64> @vmacc_vv_nxv4i64_unmasked(<4 x i64> %a, <4 x i64> %b, <4 x i64
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %allones, <4 x i64> %y, <4 x i64> %c, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> splat (i1 -1), <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
 
@@ -1724,10 +1528,8 @@ define <4 x i64> @vmacc_vx_nxv4i64(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x i1>
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1756,11 +1558,9 @@ define <4 x i64> @vmacc_vx_nxv4i64_unmasked(<4 x i64> %a, i64 %b, <4 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %allones, <4 x i64> %y, <4 x i64> %c, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> splat (i1 -1), <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
 
@@ -1771,10 +1571,8 @@ define <4 x i64> @vmacc_vv_nxv4i64_ta(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c,
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1803,10 +1601,8 @@ define <4 x i64> @vmacc_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.add.nxv4i64(<4 x i64> %x, <4 x i64> %c, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1823,10 +1619,8 @@ define <8 x i64> @vmacc_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c,  <8
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1838,11 +1632,9 @@ define <8 x i64> @vmacc_vv_nxv8i64_unmasked(<8 x i64> %a, <8 x i64> %b, <8 x i64
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %allones, <8 x i64> %y, <8 x i64> %c, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> splat (i1 -1), <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
 
@@ -1870,10 +1662,8 @@ define <8 x i64> @vmacc_vx_nxv8i64(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x i1>
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1902,11 +1692,9 @@ define <8 x i64> @vmacc_vx_nxv8i64_unmasked(<8 x i64> %a, i64 %b, <8 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %allones, <8 x i64> %y, <8 x i64> %c, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> splat (i1 -1), <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
 
@@ -1917,10 +1705,8 @@ define <8 x i64> @vmacc_vv_nxv8i64_ta(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c,
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1949,10 +1735,8 @@ define <8 x i64> @vmacc_vx_nxv8i64_ta(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.add.nxv8i64(<8 x i64> %x, <8 x i64> %c, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 6af5ba185b8b8b..3db44e87109bd4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vmax_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vmax_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <4 x i8> @vmax_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -127,9 +121,7 @@ define <4 x i8> @vmax_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <5 x i8> @vmax_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <5 x i8> @vmax_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <8 x i8> @vmax_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <8 x i8> @vmax_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <16 x i8> @vmax_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <16 x i8> @vmax_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -332,9 +312,7 @@ define <256 x i8> @vmax_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.smax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.smax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -390,9 +368,7 @@ define <2 x i16> @vmax_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -416,9 +392,7 @@ define <2 x i16> @vmax_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -440,9 +414,7 @@ define <4 x i16> @vmax_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -466,9 +438,7 @@ define <4 x i16> @vmax_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -490,9 +460,7 @@ define <8 x i16> @vmax_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -516,9 +484,7 @@ define <8 x i16> @vmax_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -540,9 +506,7 @@ define <16 x i16> @vmax_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -566,9 +530,7 @@ define <16 x i16> @vmax_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -590,9 +552,7 @@ define <2 x i32> @vmax_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -616,9 +576,7 @@ define <2 x i32> @vmax_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -640,9 +598,7 @@ define <4 x i32> @vmax_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -666,9 +622,7 @@ define <4 x i32> @vmax_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -690,9 +644,7 @@ define <8 x i32> @vmax_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -716,9 +668,7 @@ define <8 x i32> @vmax_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -740,9 +690,7 @@ define <16 x i32> @vmax_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -766,9 +714,7 @@ define <16 x i32> @vmax_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -790,9 +736,7 @@ define <2 x i64> @vmax_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -844,9 +788,7 @@ define <2 x i64> @vmax_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -868,9 +810,7 @@ define <4 x i64> @vmax_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -922,9 +862,7 @@ define <4 x i64> @vmax_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -946,9 +884,7 @@ define <8 x i64> @vmax_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1000,9 +936,7 @@ define <8 x i64> @vmax_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1024,9 +958,7 @@ define <16 x i64> @vmax_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1078,9 +1010,7 @@ define <16 x i64> @vmax_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1092,16 +1022,16 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vmax_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1134,8 +1064,6 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmax.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index 12c6410068c693..c97c2232715f56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vmaxu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vmaxu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -88,9 +84,7 @@ define <4 x i8> @vmaxu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -126,9 +120,7 @@ define <4 x i8> @vmaxu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -150,9 +142,7 @@ define <5 x i8> @vmaxu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -176,9 +166,7 @@ define <5 x i8> @vmaxu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -200,9 +188,7 @@ define <8 x i8> @vmaxu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -226,9 +212,7 @@ define <8 x i8> @vmaxu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -250,9 +234,7 @@ define <16 x i8> @vmaxu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -276,9 +258,7 @@ define <16 x i8> @vmaxu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -331,9 +311,7 @@ define <256 x i8> @vmaxu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.umax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.umax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -389,9 +367,7 @@ define <2 x i16> @vmaxu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -415,9 +391,7 @@ define <2 x i16> @vmaxu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -439,9 +413,7 @@ define <4 x i16> @vmaxu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -465,9 +437,7 @@ define <4 x i16> @vmaxu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -489,9 +459,7 @@ define <8 x i16> @vmaxu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -515,9 +483,7 @@ define <8 x i16> @vmaxu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -539,9 +505,7 @@ define <16 x i16> @vmaxu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -565,9 +529,7 @@ define <16 x i16> @vmaxu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -589,9 +551,7 @@ define <2 x i32> @vmaxu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -615,9 +575,7 @@ define <2 x i32> @vmaxu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -639,9 +597,7 @@ define <4 x i32> @vmaxu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -665,9 +621,7 @@ define <4 x i32> @vmaxu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -689,9 +643,7 @@ define <8 x i32> @vmaxu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -715,9 +667,7 @@ define <8 x i32> @vmaxu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -739,9 +689,7 @@ define <16 x i32> @vmaxu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -765,9 +713,7 @@ define <16 x i32> @vmaxu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -789,9 +735,7 @@ define <2 x i64> @vmaxu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -843,9 +787,7 @@ define <2 x i64> @vmaxu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -867,9 +809,7 @@ define <4 x i64> @vmaxu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -921,9 +861,7 @@ define <4 x i64> @vmaxu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -945,9 +883,7 @@ define <8 x i64> @vmaxu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -999,9 +935,7 @@ define <8 x i64> @vmaxu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1023,9 +957,7 @@ define <16 x i64> @vmaxu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1077,9 +1009,7 @@ define <16 x i64> @vmaxu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1091,16 +1021,16 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vmaxu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1133,8 +1063,6 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmaxu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index f5b9421d28c349..eaa19110a2a28c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vmin_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vmin_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <4 x i8> @vmin_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -127,9 +121,7 @@ define <4 x i8> @vmin_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <5 x i8> @vmin_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <5 x i8> @vmin_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <8 x i8> @vmin_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <8 x i8> @vmin_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <16 x i8> @vmin_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <16 x i8> @vmin_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -332,9 +312,7 @@ define <256 x i8> @vmin_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.smin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.smin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -390,9 +368,7 @@ define <2 x i16> @vmin_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -416,9 +392,7 @@ define <2 x i16> @vmin_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -440,9 +414,7 @@ define <4 x i16> @vmin_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -466,9 +438,7 @@ define <4 x i16> @vmin_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -490,9 +460,7 @@ define <8 x i16> @vmin_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -516,9 +484,7 @@ define <8 x i16> @vmin_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -540,9 +506,7 @@ define <16 x i16> @vmin_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -566,9 +530,7 @@ define <16 x i16> @vmin_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -590,9 +552,7 @@ define <2 x i32> @vmin_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -616,9 +576,7 @@ define <2 x i32> @vmin_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -640,9 +598,7 @@ define <4 x i32> @vmin_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -666,9 +622,7 @@ define <4 x i32> @vmin_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -690,9 +644,7 @@ define <8 x i32> @vmin_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -716,9 +668,7 @@ define <8 x i32> @vmin_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -740,9 +690,7 @@ define <16 x i32> @vmin_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -766,9 +714,7 @@ define <16 x i32> @vmin_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -790,9 +736,7 @@ define <2 x i64> @vmin_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -844,9 +788,7 @@ define <2 x i64> @vmin_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -868,9 +810,7 @@ define <4 x i64> @vmin_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -922,9 +862,7 @@ define <4 x i64> @vmin_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -946,9 +884,7 @@ define <8 x i64> @vmin_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1000,9 +936,7 @@ define <8 x i64> @vmin_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1024,9 +958,7 @@ define <16 x i64> @vmin_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1078,9 +1010,7 @@ define <16 x i64> @vmin_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1092,16 +1022,16 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vmin_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1134,8 +1064,6 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmin.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index d07580efceb50b..48175e5b905ba5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vminu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vminu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -88,9 +84,7 @@ define <4 x i8> @vminu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -126,9 +120,7 @@ define <4 x i8> @vminu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -150,9 +142,7 @@ define <5 x i8> @vminu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -176,9 +166,7 @@ define <5 x i8> @vminu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -200,9 +188,7 @@ define <8 x i8> @vminu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -226,9 +212,7 @@ define <8 x i8> @vminu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -250,9 +234,7 @@ define <16 x i8> @vminu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -276,9 +258,7 @@ define <16 x i8> @vminu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -331,9 +311,7 @@ define <256 x i8> @vminu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.umin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.umin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -389,9 +367,7 @@ define <2 x i16> @vminu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -415,9 +391,7 @@ define <2 x i16> @vminu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -439,9 +413,7 @@ define <4 x i16> @vminu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -465,9 +437,7 @@ define <4 x i16> @vminu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -489,9 +459,7 @@ define <8 x i16> @vminu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -515,9 +483,7 @@ define <8 x i16> @vminu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -539,9 +505,7 @@ define <16 x i16> @vminu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -565,9 +529,7 @@ define <16 x i16> @vminu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -589,9 +551,7 @@ define <2 x i32> @vminu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -615,9 +575,7 @@ define <2 x i32> @vminu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -639,9 +597,7 @@ define <4 x i32> @vminu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -665,9 +621,7 @@ define <4 x i32> @vminu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -689,9 +643,7 @@ define <8 x i32> @vminu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -715,9 +667,7 @@ define <8 x i32> @vminu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -739,9 +689,7 @@ define <16 x i32> @vminu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -765,9 +713,7 @@ define <16 x i32> @vminu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -789,9 +735,7 @@ define <2 x i64> @vminu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -843,9 +787,7 @@ define <2 x i64> @vminu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -867,9 +809,7 @@ define <4 x i64> @vminu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -921,9 +861,7 @@ define <4 x i64> @vminu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -945,9 +883,7 @@ define <8 x i64> @vminu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -999,9 +935,7 @@ define <8 x i64> @vminu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1023,9 +957,7 @@ define <16 x i64> @vminu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1077,9 +1009,7 @@ define <16 x i64> @vminu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1091,16 +1021,16 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vminu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1133,8 +1063,6 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vminu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
index fbb97d6bf32299..2525ac03ea9a87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vmul_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -60,9 +58,7 @@ define <2 x i8> @vmul_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,9 +80,7 @@ define <4 x i8> @vmul_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -110,9 +104,7 @@ define <4 x i8> @vmul_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -134,9 +126,7 @@ define <8 x i8> @vmul_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -160,9 +150,7 @@ define <8 x i8> @vmul_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -184,9 +172,7 @@ define <16 x i8> @vmul_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -210,9 +196,7 @@ define <16 x i8> @vmul_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -234,9 +218,7 @@ define <2 x i16> @vmul_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -260,9 +242,7 @@ define <2 x i16> @vmul_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -284,9 +264,7 @@ define <4 x i16> @vmul_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -310,9 +288,7 @@ define <4 x i16> @vmul_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -334,9 +310,7 @@ define <8 x i16> @vmul_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -372,9 +346,7 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -396,9 +368,7 @@ define <12 x i16> @vmul_vv_v12i16_unmasked(<12 x i16> %va, <12 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <12 x i1> poison, i1 true, i32 0
-  %m = shufflevector <12 x i1> %head, <12 x i1> poison, <12 x i32> zeroinitializer
-  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl)
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> splat (i1 true), i32 %evl)
   ret <12 x i16> %v
 }
 
@@ -422,9 +392,7 @@ define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <12 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <12 x i16> %elt.head, <12 x i16> poison, <12 x i32> zeroinitializer
-  %head = insertelement <12 x i1> poison, i1 true, i32 0
-  %m = shufflevector <12 x i1> %head, <12 x i1> poison, <12 x i32> zeroinitializer
-  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> %m, i32 %evl)
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> splat (i1 true), i32 %evl)
   ret <12 x i16> %v
 }
 
@@ -446,9 +414,7 @@ define <16 x i16> @vmul_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -472,9 +438,7 @@ define <16 x i16> @vmul_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -496,9 +460,7 @@ define <2 x i32> @vmul_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -522,9 +484,7 @@ define <2 x i32> @vmul_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -546,9 +506,7 @@ define <4 x i32> @vmul_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -572,9 +530,7 @@ define <4 x i32> @vmul_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -596,9 +552,7 @@ define <8 x i32> @vmul_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -622,9 +576,7 @@ define <8 x i32> @vmul_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -646,9 +598,7 @@ define <16 x i32> @vmul_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -672,9 +622,7 @@ define <16 x i32> @vmul_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -696,9 +644,7 @@ define <2 x i64> @vmul_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -750,9 +696,7 @@ define <2 x i64> @vmul_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -774,9 +718,7 @@ define <4 x i64> @vmul_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -828,9 +770,7 @@ define <4 x i64> @vmul_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -852,9 +792,7 @@ define <8 x i64> @vmul_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -906,9 +844,7 @@ define <8 x i64> @vmul_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -930,9 +866,7 @@ define <16 x i64> @vmul_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -984,8 +918,6 @@ define <16 x i64> @vmul_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
index 744ed991bccc29..695fba6d54e033 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnmsac-vp.ll
@@ -16,10 +16,8 @@ define <2 x i8> @vnmsac_vv_nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c,  <2 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -31,11 +29,9 @@ define <2 x i8> @vnmsac_vv_nxv2i8_unmasked(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %allones, <2 x i8> %y, <2 x i8> %c, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> splat (i1 -1), <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
 
@@ -48,10 +44,8 @@ define <2 x i8> @vnmsac_vx_nxv2i8(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -65,11 +59,9 @@ define <2 x i8> @vnmsac_vx_nxv2i8_unmasked(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> %allones, <2 x i8> %y, <2 x i8> %c, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i8> @llvm.vp.merge.nxv2i8(<2 x i1> splat (i1 -1), <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
 
@@ -80,10 +72,8 @@ define <2 x i8> @vnmsac_vv_nxv2i8_ta(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c,  <2
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -97,10 +87,8 @@ define <2 x i8> @vnmsac_vx_nxv2i8_ta(<2 x i8> %a, i8 %b, <2 x i8> %c,  <2 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i8> @llvm.vp.mul.nxv2i8(<2 x i8> %a, <2 x i8> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i8> @llvm.vp.sub.nxv2i8(<2 x i8> %c, <2 x i8> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i8> @llvm.vp.select.nxv2i8(<2 x i1> %m, <2 x i8> %y, <2 x i8> %c, i32 %evl)
   ret <2 x i8> %u
 }
@@ -117,10 +105,8 @@ define <4 x i8> @vnmsac_vv_nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c,  <4 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -132,11 +118,9 @@ define <4 x i8> @vnmsac_vv_nxv4i8_unmasked(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %allones, <4 x i8> %y, <4 x i8> %c, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> splat (i1 -1), <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
 
@@ -149,10 +133,8 @@ define <4 x i8> @vnmsac_vx_nxv4i8(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -166,11 +148,9 @@ define <4 x i8> @vnmsac_vx_nxv4i8_unmasked(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> %allones, <4 x i8> %y, <4 x i8> %c, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i8> @llvm.vp.merge.nxv4i8(<4 x i1> splat (i1 -1), <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
 
@@ -181,10 +161,8 @@ define <4 x i8> @vnmsac_vv_nxv4i8_ta(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c,  <4
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -198,10 +176,8 @@ define <4 x i8> @vnmsac_vx_nxv4i8_ta(<4 x i8> %a, i8 %b, <4 x i8> %c,  <4 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i8> @llvm.vp.mul.nxv4i8(<4 x i8> %a, <4 x i8> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i8> @llvm.vp.sub.nxv4i8(<4 x i8> %c, <4 x i8> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i8> @llvm.vp.select.nxv4i8(<4 x i1> %m, <4 x i8> %y, <4 x i8> %c, i32 %evl)
   ret <4 x i8> %u
 }
@@ -218,10 +194,8 @@ define <8 x i8> @vnmsac_vv_nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c,  <8 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -233,11 +207,9 @@ define <8 x i8> @vnmsac_vv_nxv8i8_unmasked(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %allones, <8 x i8> %y, <8 x i8> %c, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> splat (i1 -1), <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
 
@@ -250,10 +222,8 @@ define <8 x i8> @vnmsac_vx_nxv8i8(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8 x i1> %m,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -267,11 +237,9 @@ define <8 x i8> @vnmsac_vx_nxv8i8_unmasked(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> %allones, <8 x i8> %y, <8 x i8> %c, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i8> @llvm.vp.merge.nxv8i8(<8 x i1> splat (i1 -1), <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
 
@@ -282,10 +250,8 @@ define <8 x i8> @vnmsac_vv_nxv8i8_ta(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c,  <8
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -299,10 +265,8 @@ define <8 x i8> @vnmsac_vx_nxv8i8_ta(<8 x i8> %a, i8 %b, <8 x i8> %c,  <8 x i1>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i8> @llvm.vp.mul.nxv8i8(<8 x i8> %a, <8 x i8> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i8> @llvm.vp.sub.nxv8i8(<8 x i8> %c, <8 x i8> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i8> @llvm.vp.select.nxv8i8(<8 x i1> %m, <8 x i8> %y, <8 x i8> %c, i32 %evl)
   ret <8 x i8> %u
 }
@@ -319,10 +283,8 @@ define <16 x i8> @vnmsac_vv_nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -334,11 +296,9 @@ define <16 x i8> @vnmsac_vv_nxv16i8_unmasked(<16 x i8> %a, <16 x i8> %b, <16 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %allones, <16 x i8> %y, <16 x i8> %c, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> splat (i1 -1), <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
 
@@ -351,10 +311,8 @@ define <16 x i8> @vnmsac_vx_nxv16i8(<16 x i8> %a, i8 %b, <16 x i8> %c,  <16 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -368,11 +326,9 @@ define <16 x i8> @vnmsac_vx_nxv16i8_unmasked(<16 x i8> %a, i8 %b, <16 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> %allones, <16 x i8> %y, <16 x i8> %c, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i8> @llvm.vp.merge.nxv16i8(<16 x i1> splat (i1 -1), <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
 
@@ -383,10 +339,8 @@ define <16 x i8> @vnmsac_vv_nxv16i8_ta(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -400,10 +354,8 @@ define <16 x i8> @vnmsac_vx_nxv16i8_ta(<16 x i8> %a, i8 %b, <16 x i8> %c,  <16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i8> @llvm.vp.mul.nxv16i8(<16 x i8> %a, <16 x i8> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i8> @llvm.vp.sub.nxv16i8(<16 x i8> %c, <16 x i8> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i8> @llvm.vp.select.nxv16i8(<16 x i1> %m, <16 x i8> %y, <16 x i8> %c, i32 %evl)
   ret <16 x i8> %u
 }
@@ -420,10 +372,8 @@ define <32 x i8> @vnmsac_vv_nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -435,11 +385,9 @@ define <32 x i8> @vnmsac_vv_nxv32i8_unmasked(<32 x i8> %a, <32 x i8> %b, <32 x i
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %allones, <32 x i8> %y, <32 x i8> %c, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> splat (i1 -1), <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
 
@@ -452,10 +400,8 @@ define <32 x i8> @vnmsac_vx_nxv32i8(<32 x i8> %a, i8 %b, <32 x i8> %c,  <32 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -469,11 +415,9 @@ define <32 x i8> @vnmsac_vx_nxv32i8_unmasked(<32 x i8> %a, i8 %b, <32 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> %allones, <32 x i8> %y, <32 x i8> %c, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i8> @llvm.vp.merge.nxv32i8(<32 x i1> splat (i1 -1), <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
 
@@ -484,10 +428,8 @@ define <32 x i8> @vnmsac_vv_nxv32i8_ta(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c,
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -501,10 +443,8 @@ define <32 x i8> @vnmsac_vx_nxv32i8_ta(<32 x i8> %a, i8 %b, <32 x i8> %c,  <32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <32 x i8> %elt.head, <32 x i8> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i8> @llvm.vp.mul.nxv32i8(<32 x i8> %a, <32 x i8> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i8> @llvm.vp.sub.nxv32i8(<32 x i8> %c, <32 x i8> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i8> @llvm.vp.select.nxv32i8(<32 x i1> %m, <32 x i8> %y, <32 x i8> %c, i32 %evl)
   ret <32 x i8> %u
 }
@@ -521,10 +461,8 @@ define <64 x i8> @vnmsac_vv_nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -536,11 +474,9 @@ define <64 x i8> @vnmsac_vv_nxv64i8_unmasked(<64 x i8> %a, <64 x i8> %b, <64 x i
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
-  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %allones, <64 x i8> %y, <64 x i8> %c, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> splat (i1 -1), <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
 
@@ -553,10 +489,8 @@ define <64 x i8> @vnmsac_vx_nxv64i8(<64 x i8> %a, i8 %b, <64 x i8> %c,  <64 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -570,11 +504,9 @@ define <64 x i8> @vnmsac_vx_nxv64i8_unmasked(<64 x i8> %a, i8 %b, <64 x i8> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
-  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> %allones, <64 x i8> %y, <64 x i8> %c, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <64 x i8> @llvm.vp.merge.nxv64i8(<64 x i1> splat (i1 -1), <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
 
@@ -585,10 +517,8 @@ define <64 x i8> @vnmsac_vv_nxv64i8_ta(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c,
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %b, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -602,10 +532,8 @@ define <64 x i8> @vnmsac_vx_nxv64i8_ta(<64 x i8> %a, i8 %b, <64 x i8> %c,  <64 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <64 x i8> %elt.head, <64 x i8> poison, <64 x i32> zeroinitializer
-  %splat = insertelement <64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <64 x i1> %splat, <64 x i1> poison, <64 x i32> zeroinitializer
-  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> %allones, i32 %evl)
-  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> %allones, i32 %evl)
+  %x = call <64 x i8> @llvm.vp.mul.nxv64i8(<64 x i8> %a, <64 x i8> %vb, <64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <64 x i8> @llvm.vp.sub.nxv64i8(<64 x i8> %c, <64 x i8> %x, <64 x i1> splat (i1 -1), i32 %evl)
   %u = call <64 x i8> @llvm.vp.select.nxv64i8(<64 x i1> %m, <64 x i8> %y, <64 x i8> %c, i32 %evl)
   ret <64 x i8> %u
 }
@@ -622,10 +550,8 @@ define <2 x i16> @vnmsac_vv_nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -637,11 +563,9 @@ define <2 x i16> @vnmsac_vv_nxv2i16_unmasked(<2 x i16> %a, <2 x i16> %b, <2 x i1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %allones, <2 x i16> %y, <2 x i16> %c, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> splat (i1 -1), <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
 
@@ -654,10 +578,8 @@ define <2 x i16> @vnmsac_vx_nxv2i16(<2 x i16> %a, i16 %b, <2 x i16> %c,  <2 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -671,11 +593,9 @@ define <2 x i16> @vnmsac_vx_nxv2i16_unmasked(<2 x i16> %a, i16 %b, <2 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> %allones, <2 x i16> %y, <2 x i16> %c, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i16> @llvm.vp.merge.nxv2i16(<2 x i1> splat (i1 -1), <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
 
@@ -686,10 +606,8 @@ define <2 x i16> @vnmsac_vv_nxv2i16_ta(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -703,10 +621,8 @@ define <2 x i16> @vnmsac_vx_nxv2i16_ta(<2 x i16> %a, i16 %b, <2 x i16> %c,  <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i16> @llvm.vp.mul.nxv2i16(<2 x i16> %a, <2 x i16> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i16> @llvm.vp.sub.nxv2i16(<2 x i16> %c, <2 x i16> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i16> @llvm.vp.select.nxv2i16(<2 x i1> %m, <2 x i16> %y, <2 x i16> %c, i32 %evl)
   ret <2 x i16> %u
 }
@@ -723,10 +639,8 @@ define <4 x i16> @vnmsac_vv_nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -738,11 +652,9 @@ define <4 x i16> @vnmsac_vv_nxv4i16_unmasked(<4 x i16> %a, <4 x i16> %b, <4 x i1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %allones, <4 x i16> %y, <4 x i16> %c, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> splat (i1 -1), <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
 
@@ -755,10 +667,8 @@ define <4 x i16> @vnmsac_vx_nxv4i16(<4 x i16> %a, i16 %b, <4 x i16> %c,  <4 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -772,11 +682,9 @@ define <4 x i16> @vnmsac_vx_nxv4i16_unmasked(<4 x i16> %a, i16 %b, <4 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> %allones, <4 x i16> %y, <4 x i16> %c, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i16> @llvm.vp.merge.nxv4i16(<4 x i1> splat (i1 -1), <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
 
@@ -787,10 +695,8 @@ define <4 x i16> @vnmsac_vv_nxv4i16_ta(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -804,10 +710,8 @@ define <4 x i16> @vnmsac_vx_nxv4i16_ta(<4 x i16> %a, i16 %b, <4 x i16> %c,  <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i16> @llvm.vp.mul.nxv4i16(<4 x i16> %a, <4 x i16> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i16> @llvm.vp.sub.nxv4i16(<4 x i16> %c, <4 x i16> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i16> @llvm.vp.select.nxv4i16(<4 x i1> %m, <4 x i16> %y, <4 x i16> %c, i32 %evl)
   ret <4 x i16> %u
 }
@@ -824,10 +728,8 @@ define <8 x i16> @vnmsac_vv_nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -839,11 +741,9 @@ define <8 x i16> @vnmsac_vv_nxv8i16_unmasked(<8 x i16> %a, <8 x i16> %b, <8 x i1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %allones, <8 x i16> %y, <8 x i16> %c, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> splat (i1 -1), <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
 
@@ -856,10 +756,8 @@ define <8 x i16> @vnmsac_vx_nxv8i16(<8 x i16> %a, i16 %b, <8 x i16> %c,  <8 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -873,11 +771,9 @@ define <8 x i16> @vnmsac_vx_nxv8i16_unmasked(<8 x i16> %a, i16 %b, <8 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> %allones, <8 x i16> %y, <8 x i16> %c, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i16> @llvm.vp.merge.nxv8i16(<8 x i1> splat (i1 -1), <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
 
@@ -888,10 +784,8 @@ define <8 x i16> @vnmsac_vv_nxv8i16_ta(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -905,10 +799,8 @@ define <8 x i16> @vnmsac_vx_nxv8i16_ta(<8 x i16> %a, i16 %b, <8 x i16> %c,  <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i16> @llvm.vp.mul.nxv8i16(<8 x i16> %a, <8 x i16> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i16> @llvm.vp.sub.nxv8i16(<8 x i16> %c, <8 x i16> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i16> @llvm.vp.select.nxv8i16(<8 x i1> %m, <8 x i16> %y, <8 x i16> %c, i32 %evl)
   ret <8 x i16> %u
 }
@@ -925,10 +817,8 @@ define <16 x i16> @vnmsac_vv_nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -940,11 +830,9 @@ define <16 x i16> @vnmsac_vv_nxv16i16_unmasked(<16 x i16> %a, <16 x i16> %b, <16
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %allones, <16 x i16> %y, <16 x i16> %c, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> splat (i1 -1), <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
 
@@ -957,10 +845,8 @@ define <16 x i16> @vnmsac_vx_nxv16i16(<16 x i16> %a, i16 %b, <16 x i16> %c,  <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -974,11 +860,9 @@ define <16 x i16> @vnmsac_vx_nxv16i16_unmasked(<16 x i16> %a, i16 %b, <16 x i16>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> %allones, <16 x i16> %y, <16 x i16> %c, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i16> @llvm.vp.merge.nxv16i16(<16 x i1> splat (i1 -1), <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
 
@@ -989,10 +873,8 @@ define <16 x i16> @vnmsac_vv_nxv16i16_ta(<16 x i16> %a, <16 x i16> %b, <16 x i16
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -1006,10 +888,8 @@ define <16 x i16> @vnmsac_vx_nxv16i16_ta(<16 x i16> %a, i16 %b, <16 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i16> @llvm.vp.mul.nxv16i16(<16 x i16> %a, <16 x i16> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i16> @llvm.vp.sub.nxv16i16(<16 x i16> %c, <16 x i16> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i16> @llvm.vp.select.nxv16i16(<16 x i1> %m, <16 x i16> %y, <16 x i16> %c, i32 %evl)
   ret <16 x i16> %u
 }
@@ -1026,10 +906,8 @@ define <32 x i16> @vnmsac_vv_nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1041,11 +919,9 @@ define <32 x i16> @vnmsac_vv_nxv32i16_unmasked(<32 x i16> %a, <32 x i16> %b, <32
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %allones, <32 x i16> %y, <32 x i16> %c, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> splat (i1 -1), <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
 
@@ -1058,10 +934,8 @@ define <32 x i16> @vnmsac_vx_nxv32i16(<32 x i16> %a, i16 %b, <32 x i16> %c,  <32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1075,11 +949,9 @@ define <32 x i16> @vnmsac_vx_nxv32i16_unmasked(<32 x i16> %a, i16 %b, <32 x i16>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
-  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> %allones, <32 x i16> %y, <32 x i16> %c, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <32 x i16> @llvm.vp.merge.nxv32i16(<32 x i1> splat (i1 -1), <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
 
@@ -1090,10 +962,8 @@ define <32 x i16> @vnmsac_vv_nxv32i16_ta(<32 x i16> %a, <32 x i16> %b, <32 x i16
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %b, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1107,10 +977,8 @@ define <32 x i16> @vnmsac_vx_nxv32i16_ta(<32 x i16> %a, i16 %b, <32 x i16> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <32 x i16> %elt.head, <32 x i16> poison, <32 x i32> zeroinitializer
-  %splat = insertelement <32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <32 x i1> %splat, <32 x i1> poison, <32 x i32> zeroinitializer
-  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> %allones, i32 %evl)
-  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> %allones, i32 %evl)
+  %x = call <32 x i16> @llvm.vp.mul.nxv32i16(<32 x i16> %a, <32 x i16> %vb, <32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <32 x i16> @llvm.vp.sub.nxv32i16(<32 x i16> %c, <32 x i16> %x, <32 x i1> splat (i1 -1), i32 %evl)
   %u = call <32 x i16> @llvm.vp.select.nxv32i16(<32 x i1> %m, <32 x i16> %y, <32 x i16> %c, i32 %evl)
   ret <32 x i16> %u
 }
@@ -1127,10 +995,8 @@ define <2 x i32> @vnmsac_vv_nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1142,11 +1008,9 @@ define <2 x i32> @vnmsac_vv_nxv2i32_unmasked(<2 x i32> %a, <2 x i32> %b, <2 x i3
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %allones, <2 x i32> %y, <2 x i32> %c, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> splat (i1 -1), <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
 
@@ -1159,10 +1023,8 @@ define <2 x i32> @vnmsac_vx_nxv2i32(<2 x i32> %a, i32 %b, <2 x i32> %c,  <2 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1176,11 +1038,9 @@ define <2 x i32> @vnmsac_vx_nxv2i32_unmasked(<2 x i32> %a, i32 %b, <2 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> %allones, <2 x i32> %y, <2 x i32> %c, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i32> @llvm.vp.merge.nxv2i32(<2 x i1> splat (i1 -1), <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
 
@@ -1191,10 +1051,8 @@ define <2 x i32> @vnmsac_vv_nxv2i32_ta(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1208,10 +1066,8 @@ define <2 x i32> @vnmsac_vx_nxv2i32_ta(<2 x i32> %a, i32 %b, <2 x i32> %c,  <2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i32> @llvm.vp.mul.nxv2i32(<2 x i32> %a, <2 x i32> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i32> @llvm.vp.sub.nxv2i32(<2 x i32> %c, <2 x i32> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i32> @llvm.vp.select.nxv2i32(<2 x i1> %m, <2 x i32> %y, <2 x i32> %c, i32 %evl)
   ret <2 x i32> %u
 }
@@ -1228,10 +1084,8 @@ define <4 x i32> @vnmsac_vv_nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1243,11 +1097,9 @@ define <4 x i32> @vnmsac_vv_nxv4i32_unmasked(<4 x i32> %a, <4 x i32> %b, <4 x i3
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %allones, <4 x i32> %y, <4 x i32> %c, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> splat (i1 -1), <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
 
@@ -1260,10 +1112,8 @@ define <4 x i32> @vnmsac_vx_nxv4i32(<4 x i32> %a, i32 %b, <4 x i32> %c,  <4 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1277,11 +1127,9 @@ define <4 x i32> @vnmsac_vx_nxv4i32_unmasked(<4 x i32> %a, i32 %b, <4 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> %allones, <4 x i32> %y, <4 x i32> %c, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i32> @llvm.vp.merge.nxv4i32(<4 x i1> splat (i1 -1), <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
 
@@ -1292,10 +1140,8 @@ define <4 x i32> @vnmsac_vv_nxv4i32_ta(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1309,10 +1155,8 @@ define <4 x i32> @vnmsac_vx_nxv4i32_ta(<4 x i32> %a, i32 %b, <4 x i32> %c,  <4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i32> @llvm.vp.mul.nxv4i32(<4 x i32> %a, <4 x i32> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i32> @llvm.vp.sub.nxv4i32(<4 x i32> %c, <4 x i32> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i32> @llvm.vp.select.nxv4i32(<4 x i1> %m, <4 x i32> %y, <4 x i32> %c, i32 %evl)
   ret <4 x i32> %u
 }
@@ -1329,10 +1173,8 @@ define <8 x i32> @vnmsac_vv_nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1344,11 +1186,9 @@ define <8 x i32> @vnmsac_vv_nxv8i32_unmasked(<8 x i32> %a, <8 x i32> %b, <8 x i3
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %allones, <8 x i32> %y, <8 x i32> %c, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> splat (i1 -1), <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
 
@@ -1361,10 +1201,8 @@ define <8 x i32> @vnmsac_vx_nxv8i32(<8 x i32> %a, i32 %b, <8 x i32> %c,  <8 x i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1378,11 +1216,9 @@ define <8 x i32> @vnmsac_vx_nxv8i32_unmasked(<8 x i32> %a, i32 %b, <8 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> %allones, <8 x i32> %y, <8 x i32> %c, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i32> @llvm.vp.merge.nxv8i32(<8 x i1> splat (i1 -1), <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
 
@@ -1393,10 +1229,8 @@ define <8 x i32> @vnmsac_vv_nxv8i32_ta(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c,
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1410,10 +1244,8 @@ define <8 x i32> @vnmsac_vx_nxv8i32_ta(<8 x i32> %a, i32 %b, <8 x i32> %c,  <8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i32> @llvm.vp.mul.nxv8i32(<8 x i32> %a, <8 x i32> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i32> @llvm.vp.sub.nxv8i32(<8 x i32> %c, <8 x i32> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i32> @llvm.vp.select.nxv8i32(<8 x i1> %m, <8 x i32> %y, <8 x i32> %c, i32 %evl)
   ret <8 x i32> %u
 }
@@ -1430,10 +1262,8 @@ define <16 x i32> @vnmsac_vv_nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1445,11 +1275,9 @@ define <16 x i32> @vnmsac_vv_nxv16i32_unmasked(<16 x i32> %a, <16 x i32> %b, <16
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %allones, <16 x i32> %y, <16 x i32> %c, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> splat (i1 -1), <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
 
@@ -1462,10 +1290,8 @@ define <16 x i32> @vnmsac_vx_nxv16i32(<16 x i32> %a, i32 %b, <16 x i32> %c,  <16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1479,11 +1305,9 @@ define <16 x i32> @vnmsac_vx_nxv16i32_unmasked(<16 x i32> %a, i32 %b, <16 x i32>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
-  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> %allones, <16 x i32> %y, <16 x i32> %c, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <16 x i32> @llvm.vp.merge.nxv16i32(<16 x i1> splat (i1 -1), <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
 
@@ -1494,10 +1318,8 @@ define <16 x i32> @vnmsac_vv_nxv16i32_ta(<16 x i32> %a, <16 x i32> %b, <16 x i32
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %b, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1511,10 +1333,8 @@ define <16 x i32> @vnmsac_vx_nxv16i32_ta(<16 x i32> %a, i32 %b, <16 x i32> %c,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %splat = insertelement <16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <16 x i1> %splat, <16 x i1> poison, <16 x i32> zeroinitializer
-  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> %allones, i32 %evl)
-  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> %allones, i32 %evl)
+  %x = call <16 x i32> @llvm.vp.mul.nxv16i32(<16 x i32> %a, <16 x i32> %vb, <16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <16 x i32> @llvm.vp.sub.nxv16i32(<16 x i32> %c, <16 x i32> %x, <16 x i1> splat (i1 -1), i32 %evl)
   %u = call <16 x i32> @llvm.vp.select.nxv16i32(<16 x i1> %m, <16 x i32> %y, <16 x i32> %c, i32 %evl)
   ret <16 x i32> %u
 }
@@ -1531,10 +1351,8 @@ define <2 x i64> @vnmsac_vv_nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1546,11 +1364,9 @@ define <2 x i64> @vnmsac_vv_nxv2i64_unmasked(<2 x i64> %a, <2 x i64> %b, <2 x i6
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %allones, <2 x i64> %y, <2 x i64> %c, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> splat (i1 -1), <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
 
@@ -1578,10 +1394,8 @@ define <2 x i64> @vnmsac_vx_nxv2i64(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x i1
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1610,11 +1424,9 @@ define <2 x i64> @vnmsac_vx_nxv2i64_unmasked(<2 x i64> %a, i64 %b, <2 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
-  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> %allones, <2 x i64> %y, <2 x i64> %c, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <2 x i64> @llvm.vp.merge.nxv2i64(<2 x i1> splat (i1 -1), <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
 
@@ -1625,10 +1437,8 @@ define <2 x i64> @vnmsac_vv_nxv2i64_ta(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %b, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1657,10 +1467,8 @@ define <2 x i64> @vnmsac_vx_nxv2i64_ta(<2 x i64> %a, i64 %b, <2 x i64> %c,  <2 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %splat = insertelement <2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <2 x i1> %splat, <2 x i1> poison, <2 x i32> zeroinitializer
-  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> %allones, i32 %evl)
-  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> %allones, i32 %evl)
+  %x = call <2 x i64> @llvm.vp.mul.nxv2i64(<2 x i64> %a, <2 x i64> %vb, <2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <2 x i64> @llvm.vp.sub.nxv2i64(<2 x i64> %c, <2 x i64> %x, <2 x i1> splat (i1 -1), i32 %evl)
   %u = call <2 x i64> @llvm.vp.select.nxv2i64(<2 x i1> %m, <2 x i64> %y, <2 x i64> %c, i32 %evl)
   ret <2 x i64> %u
 }
@@ -1677,10 +1485,8 @@ define <4 x i64> @vnmsac_vv_nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1692,11 +1498,9 @@ define <4 x i64> @vnmsac_vv_nxv4i64_unmasked(<4 x i64> %a, <4 x i64> %b, <4 x i6
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %allones, <4 x i64> %y, <4 x i64> %c, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> splat (i1 -1), <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
 
@@ -1724,10 +1528,8 @@ define <4 x i64> @vnmsac_vx_nxv4i64(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x i1
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1756,11 +1558,9 @@ define <4 x i64> @vnmsac_vx_nxv4i64_unmasked(<4 x i64> %a, i64 %b, <4 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
-  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> %allones, <4 x i64> %y, <4 x i64> %c, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <4 x i64> @llvm.vp.merge.nxv4i64(<4 x i1> splat (i1 -1), <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
 
@@ -1771,10 +1571,8 @@ define <4 x i64> @vnmsac_vv_nxv4i64_ta(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c,
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %b, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1803,10 +1601,8 @@ define <4 x i64> @vnmsac_vx_nxv4i64_ta(<4 x i64> %a, i64 %b, <4 x i64> %c,  <4 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %splat = insertelement <4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <4 x i1> %splat, <4 x i1> poison, <4 x i32> zeroinitializer
-  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> %allones, i32 %evl)
-  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> %allones, i32 %evl)
+  %x = call <4 x i64> @llvm.vp.mul.nxv4i64(<4 x i64> %a, <4 x i64> %vb, <4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <4 x i64> @llvm.vp.sub.nxv4i64(<4 x i64> %c, <4 x i64> %x, <4 x i1> splat (i1 -1), i32 %evl)
   %u = call <4 x i64> @llvm.vp.select.nxv4i64(<4 x i1> %m, <4 x i64> %y, <4 x i64> %c, i32 %evl)
   ret <4 x i64> %u
 }
@@ -1823,10 +1619,8 @@ define <8 x i64> @vnmsac_vv_nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c,  <
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1838,11 +1632,9 @@ define <8 x i64> @vnmsac_vv_nxv8i64_unmasked(<8 x i64> %a, <8 x i64> %b, <8 x i6
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %allones, <8 x i64> %y, <8 x i64> %c, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> splat (i1 -1), <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
 
@@ -1870,10 +1662,8 @@ define <8 x i64> @vnmsac_vx_nxv8i64(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x i1
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1902,11 +1692,9 @@ define <8 x i64> @vnmsac_vx_nxv8i64_unmasked(<8 x i64> %a, i64 %b, <8 x i64> %c,
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
-  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> %allones, <8 x i64> %y, <8 x i64> %c, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <8 x i64> @llvm.vp.merge.nxv8i64(<8 x i1> splat (i1 -1), <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
 
@@ -1917,10 +1705,8 @@ define <8 x i64> @vnmsac_vv_nxv8i64_ta(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c,
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %b, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
@@ -1949,10 +1735,8 @@ define <8 x i64> @vnmsac_vx_nxv8i64_ta(<8 x i64> %a, i64 %b, <8 x i64> %c,  <8 x
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %splat = insertelement <8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
-  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> %allones, i32 %evl)
-  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> %allones, i32 %evl)
+  %x = call <8 x i64> @llvm.vp.mul.nxv8i64(<8 x i64> %a, <8 x i64> %vb, <8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <8 x i64> @llvm.vp.sub.nxv8i64(<8 x i64> %c, <8 x i64> %x, <8 x i1> splat (i1 -1), i32 %evl)
   %u = call <8 x i64> @llvm.vp.select.nxv8i64(<8 x i1> %m, <8 x i64> %y, <8 x i64> %c, i32 %evl)
   ret <8 x i64> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
index d132608cdf7f59..09c281b525a643 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -60,9 +58,7 @@ define <2 x i8> @vor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -72,9 +68,7 @@ define <2 x i8> @vor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,11 +78,7 @@ define <2 x i8> @vor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -110,9 +100,7 @@ define <4 x i8> @vor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -148,9 +136,7 @@ define <4 x i8> @vor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -160,9 +146,7 @@ define <4 x i8> @vor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -172,11 +156,7 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -198,9 +178,7 @@ define <7 x i8> @vor_vv_v5i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <7 x i1> %head, <7 x i1> poison, <7 x i32> zeroinitializer
-  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> splat (i1 true), i32 %evl)
   ret <7 x i8> %v
 }
 
@@ -224,9 +202,7 @@ define <7 x i8> @vor_vx_v5i8_unmasked(<7 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <7 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <7 x i8> %elt.head, <7 x i8> poison, <7 x i32> zeroinitializer
-  %head = insertelement <7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <7 x i1> %head, <7 x i1> poison, <7 x i32> zeroinitializer
-  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> splat (i1 true), i32 %evl)
   ret <7 x i8> %v
 }
 
@@ -236,9 +212,7 @@ define <7 x i8> @vor_vi_v5i8(<7 x i8> %va, <7 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <7 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> poison, <7 x i32> zeroinitializer
-  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> splat (i8 5), <7 x i1> %m, i32 %evl)
   ret <7 x i8> %v
 }
 
@@ -248,11 +222,7 @@ define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <7 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> poison, <7 x i32> zeroinitializer
-  %head = insertelement <7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <7 x i1> %head, <7 x i1> poison, <7 x i32> zeroinitializer
-  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> splat (i8 5), <7 x i1> splat (i1 true), i32 %evl)
   ret <7 x i8> %v
 }
 
@@ -274,9 +244,7 @@ define <8 x i8> @vor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -300,9 +268,7 @@ define <8 x i8> @vor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -312,9 +278,7 @@ define <8 x i8> @vor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -324,11 +288,7 @@ define <8 x i8> @vor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -350,9 +310,7 @@ define <16 x i8> @vor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -376,9 +334,7 @@ define <16 x i8> @vor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -388,9 +344,7 @@ define <16 x i8> @vor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -400,11 +354,7 @@ define <16 x i8> @vor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -426,9 +376,7 @@ define <2 x i16> @vor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -452,9 +400,7 @@ define <2 x i16> @vor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -464,9 +410,7 @@ define <2 x i16> @vor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -476,11 +420,7 @@ define <2 x i16> @vor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -502,9 +442,7 @@ define <4 x i16> @vor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -528,9 +466,7 @@ define <4 x i16> @vor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -540,9 +476,7 @@ define <4 x i16> @vor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -552,11 +486,7 @@ define <4 x i16> @vor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -578,9 +508,7 @@ define <8 x i16> @vor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -604,9 +532,7 @@ define <8 x i16> @vor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -616,9 +542,7 @@ define <8 x i16> @vor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -628,11 +552,7 @@ define <8 x i16> @vor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -654,9 +574,7 @@ define <16 x i16> @vor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -680,9 +598,7 @@ define <16 x i16> @vor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -692,9 +608,7 @@ define <16 x i16> @vor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -704,11 +618,7 @@ define <16 x i16> @vor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -730,9 +640,7 @@ define <2 x i32> @vor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -756,9 +664,7 @@ define <2 x i32> @vor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -768,9 +674,7 @@ define <2 x i32> @vor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -780,11 +684,7 @@ define <2 x i32> @vor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -806,9 +706,7 @@ define <4 x i32> @vor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -832,9 +730,7 @@ define <4 x i32> @vor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -844,9 +740,7 @@ define <4 x i32> @vor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -856,11 +750,7 @@ define <4 x i32> @vor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -882,9 +772,7 @@ define <8 x i32> @vor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -908,9 +796,7 @@ define <8 x i32> @vor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -920,9 +806,7 @@ define <8 x i32> @vor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -932,11 +816,7 @@ define <8 x i32> @vor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -958,9 +838,7 @@ define <16 x i32> @vor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -984,9 +862,7 @@ define <16 x i32> @vor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -996,9 +872,7 @@ define <16 x i32> @vor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1008,11 +882,7 @@ define <16 x i32> @vor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1034,9 +904,7 @@ define <2 x i64> @vor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1088,9 +956,7 @@ define <2 x i64> @vor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1100,9 +966,7 @@ define <2 x i64> @vor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1112,11 +976,7 @@ define <2 x i64> @vor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1138,9 +998,7 @@ define <4 x i64> @vor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1192,9 +1050,7 @@ define <4 x i64> @vor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1204,9 +1060,7 @@ define <4 x i64> @vor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1216,11 +1070,7 @@ define <4 x i64> @vor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1242,9 +1092,7 @@ define <8 x i64> @vor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1296,9 +1144,7 @@ define <8 x i64> @vor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl)
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1308,9 +1154,7 @@ define <8 x i64> @vor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1320,11 +1164,7 @@ define <8 x i64> @vor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1346,9 +1186,7 @@ define <16 x i64> @vor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1400,9 +1238,7 @@ define <16 x i64> @vor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1412,9 +1248,7 @@ define <16 x i64> @vor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1424,10 +1258,6 @@ define <16 x i64> @vor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index 4d2f55b172e48d..a13f1eed8efb1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -184,9 +184,7 @@ define <3 x i8> @vpgather_truemask_v3i8(<3 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <3 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <3 x i1> %mhead, <3 x i1> poison, <3 x i32> zeroinitializer
-  %v = call <3 x i8> @llvm.vp.gather.v3i8.v3p0(<3 x ptr> %ptrs, <3 x i1> %mtrue, i32 %evl)
+  %v = call <3 x i8> @llvm.vp.gather.v3i8.v3p0(<3 x ptr> %ptrs, <3 x i1> splat (i1 1), i32 %evl)
   ret <3 x i8> %v
 }
 
@@ -224,9 +222,7 @@ define <4 x i8> @vpgather_truemask_v4i8(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.gather.v4i8.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -455,9 +451,7 @@ define <4 x i16> @vpgather_truemask_v4i16(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.gather.v4i16.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -665,9 +659,7 @@ define <4 x i32> @vpgather_truemask_v4i32(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -905,9 +897,7 @@ define <4 x i64> @vpgather_truemask_v4i64(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1216,9 +1206,7 @@ define <4 x half> @vpgather_truemask_v4f16(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x half> @llvm.vp.gather.v4f16.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x half> %v
 }
 
@@ -1384,9 +1372,7 @@ define <4 x float> @vpgather_truemask_v4f32(<4 x ptr> %ptrs, i32 zeroext %evl) {
 ; RV64-NEXT:    vluxei64.v v10, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x float> %v
 }
 
@@ -1624,9 +1610,7 @@ define <4 x double> @vpgather_truemask_v4f64(<4 x ptr> %ptrs, i32 zeroext %evl)
 ; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0(<4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  %v = call <4 x double> @llvm.vp.gather.v4f64.v4p0(<4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 618b875be56651..9ef89352e65e55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -46,9 +46,7 @@ define <4 x i8> @vpload_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i8> @llvm.vp.load.v4i8.p0(ptr %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i8> @llvm.vp.load.v4i8.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %load
 }
 
@@ -106,9 +104,7 @@ define <8 x i16> @vpload_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i16> @llvm.vp.load.v8i16.p0(ptr %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i16> @llvm.vp.load.v8i16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %load
 }
 
@@ -154,9 +150,7 @@ define <6 x i32> @vpload_v6i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <6 x i1> poison, i1 true, i32 0
-  %b = shufflevector <6 x i1> %a, <6 x i1> poison, <6 x i32> zeroinitializer
-  %load = call <6 x i32> @llvm.vp.load.v6i32.p0(ptr %ptr, <6 x i1> %b, i32 %evl)
+  %load = call <6 x i32> @llvm.vp.load.v6i32.p0(ptr %ptr, <6 x i1> splat (i1 true), i32 %evl)
   ret <6 x i32> %load
 }
 
@@ -178,9 +172,7 @@ define <8 x i32> @vpload_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i32> @llvm.vp.load.v8i32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %load
 }
 
@@ -214,9 +206,7 @@ define <4 x i64> @vpload_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i64> @llvm.vp.load.v4i64.p0(ptr %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i64> @llvm.vp.load.v4i64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %load
 }
 
@@ -250,9 +240,7 @@ define <2 x half> @vpload_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  %load = call <2 x half> @llvm.vp.load.v2f16.p0(ptr %ptr, <2 x i1> %b, i32 %evl)
+  %load = call <2 x half> @llvm.vp.load.v2f16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x half> %load
 }
 
@@ -322,9 +310,7 @@ define <8 x float> @vpload_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x float> @llvm.vp.load.v8f32.p0(ptr %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x float> @llvm.vp.load.v8f32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x float> %load
 }
 
@@ -358,9 +344,7 @@ define <4 x double> @vpload_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x double> @llvm.vp.load.v4f64.p0(ptr %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x double> @llvm.vp.load.v4f64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x double> %load
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 6fe83fed6fd9c2..466448a7a05a21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -89,9 +89,7 @@ define <2 x i8> @vpmerge_vi_v2i8(<2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 2, i32 0
-  %va = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.merge.v2i8(<2 x i1> %m, <2 x i8> %va, <2 x i8> %vb, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.merge.v2i8(<2 x i1> %m, <2 x i8> splat (i8 2), <2 x i8> %vb, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -126,9 +124,7 @@ define <4 x i8> @vpmerge_vi_v4i8(<4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 2, i32 0
-  %va = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.merge.v4i8(<4 x i1> %m, <4 x i8> %va, <4 x i8> %vb, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.merge.v4i8(<4 x i1> %m, <4 x i8> splat (i8 2), <4 x i8> %vb, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -163,9 +159,7 @@ define <6 x i8> @vpmerge_vi_v6i8(<6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <6 x i8> poison, i8 2, i32 0
-  %va = shufflevector <6 x i8> %elt.head, <6 x i8> poison, <6 x i32> zeroinitializer
-  %v = call <6 x i8> @llvm.vp.merge.v6i8(<6 x i1> %m, <6 x i8> %va, <6 x i8> %vb, i32 %evl)
+  %v = call <6 x i8> @llvm.vp.merge.v6i8(<6 x i1> %m, <6 x i8> splat (i8 2), <6 x i8> %vb, i32 %evl)
   ret <6 x i8> %v
 }
 
@@ -200,9 +194,7 @@ define <8 x i7> @vpmerge_vi_v8i7(<8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i7> poison, i7 2, i32 0
-  %va = shufflevector <8 x i7> %elt.head, <8 x i7> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i7> @llvm.vp.merge.v8i7(<8 x i1> %m, <8 x i7> %va, <8 x i7> %vb, i32 %evl)
+  %v = call <8 x i7> @llvm.vp.merge.v8i7(<8 x i1> %m, <8 x i7> splat (i7 2), <8 x i7> %vb, i32 %evl)
   ret <8 x i7> %v
 }
 
@@ -237,9 +229,7 @@ define <8 x i8> @vpmerge_vi_v8i8(<8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 2, i32 0
-  %va = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.merge.v8i8(<8 x i1> %m, <8 x i8> %va, <8 x i8> %vb, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.merge.v8i8(<8 x i1> %m, <8 x i8> splat (i8 2), <8 x i8> %vb, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -274,9 +264,7 @@ define <16 x i8> @vpmerge_vi_v16i8(<16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 2, i32 0
-  %va = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.merge.v16i8(<16 x i1> %m, <16 x i8> %va, <16 x i8> %vb, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.merge.v16i8(<16 x i1> %m, <16 x i8> splat (i8 2), <16 x i8> %vb, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -311,9 +299,7 @@ define <2 x i16> @vpmerge_vi_v2i16(<2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 2, i32 0
-  %va = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.merge.v2i16(<2 x i1> %m, <2 x i16> %va, <2 x i16> %vb, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.merge.v2i16(<2 x i1> %m, <2 x i16> splat (i16 2), <2 x i16> %vb, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -348,9 +334,7 @@ define <4 x i16> @vpmerge_vi_v4i16(<4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 2, i32 0
-  %va = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.merge.v4i16(<4 x i1> %m, <4 x i16> %va, <4 x i16> %vb, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.merge.v4i16(<4 x i1> %m, <4 x i16> splat (i16 2), <4 x i16> %vb, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -385,9 +369,7 @@ define <8 x i16> @vpmerge_vi_v8i16(<8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 2, i32 0
-  %va = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.merge.v8i16(<8 x i1> %m, <8 x i16> %va, <8 x i16> %vb, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.merge.v8i16(<8 x i1> %m, <8 x i16> splat (i16 2), <8 x i16> %vb, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -422,9 +404,7 @@ define <16 x i16> @vpmerge_vi_v16i16(<16 x i16> %vb, <16 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 2, i32 0
-  %va = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.merge.v16i16(<16 x i1> %m, <16 x i16> %va, <16 x i16> %vb, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.merge.v16i16(<16 x i1> %m, <16 x i16> splat (i16 2), <16 x i16> %vb, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -459,9 +439,7 @@ define <2 x i32> @vpmerge_vi_v2i32(<2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 2, i32 0
-  %va = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.merge.v2i32(<2 x i1> %m, <2 x i32> %va, <2 x i32> %vb, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.merge.v2i32(<2 x i1> %m, <2 x i32> splat (i32 2), <2 x i32> %vb, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -496,9 +474,7 @@ define <4 x i32> @vpmerge_vi_v4i32(<4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 2, i32 0
-  %va = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.merge.v4i32(<4 x i1> %m, <4 x i32> %va, <4 x i32> %vb, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.merge.v4i32(<4 x i1> %m, <4 x i32> splat (i32 2), <4 x i32> %vb, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -533,9 +509,7 @@ define <8 x i32> @vpmerge_vi_v8i32(<8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 2, i32 0
-  %va = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %va, <8 x i32> %vb, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> splat (i32 2), <8 x i32> %vb, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -570,9 +544,7 @@ define <16 x i32> @vpmerge_vi_v16i32(<16 x i32> %vb, <16 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 2, i32 0
-  %va = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.merge.v16i32(<16 x i1> %m, <16 x i32> %va, <16 x i32> %vb, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.merge.v16i32(<16 x i1> %m, <16 x i32> splat (i32 2), <16 x i32> %vb, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -641,9 +613,7 @@ define <2 x i64> @vpmerge_vi_v2i64(<2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 2, i32 0
-  %va = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.merge.v2i64(<2 x i1> %m, <2 x i64> %va, <2 x i64> %vb, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.merge.v2i64(<2 x i1> %m, <2 x i64> splat (i64 2), <2 x i64> %vb, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -712,9 +682,7 @@ define <4 x i64> @vpmerge_vi_v4i64(<4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 2, i32 0
-  %va = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.merge.v4i64(<4 x i1> %m, <4 x i64> %va, <4 x i64> %vb, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.merge.v4i64(<4 x i1> %m, <4 x i64> splat (i64 2), <4 x i64> %vb, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -783,9 +751,7 @@ define <8 x i64> @vpmerge_vi_v8i64(<8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 2, i32 0
-  %va = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.merge.v8i64(<8 x i1> %m, <8 x i64> %va, <8 x i64> %vb, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.merge.v8i64(<8 x i1> %m, <8 x i64> splat (i64 2), <8 x i64> %vb, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -854,9 +820,7 @@ define <16 x i64> @vpmerge_vi_v16i64(<16 x i64> %vb, <16 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 2, i32 0
-  %va = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.merge.v16i64(<16 x i1> %m, <16 x i64> %va, <16 x i64> %vb, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.merge.v16i64(<16 x i1> %m, <16 x i64> splat (i64 2), <16 x i64> %vb, i32 %evl)
   ret <16 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index 83e3422c44b95d..cd9a38d5167d5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -127,9 +127,7 @@ define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs, i32 zeroext
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -266,9 +264,7 @@ define void @vpscatter_truemask_v3i16(<3 x i16> %val, <3 x ptr> %ptrs, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <3 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <3 x i1> %mhead, <3 x i1> poison, <3 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v3i16.v3p0(<3 x i16> %val, <3 x ptr> %ptrs, <3 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v3i16.v3p0(<3 x i16> %val, <3 x ptr> %ptrs, <3 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -302,9 +298,7 @@ define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -486,9 +480,7 @@ define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -722,9 +714,7 @@ define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs, i32 zeroe
 ; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1026,9 +1016,7 @@ define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs, i32 zero
 ; RV64-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1189,9 +1177,7 @@ define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs, i32 zer
 ; RV64-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1425,9 +1411,7 @@ define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs, i32 ze
 ; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v10
 ; RV64-NEXT:    ret
-  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
index d7643bc3041832..c0aa735614b21a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
@@ -274,9 +274,7 @@ define void @vpstore_v2i8_allones_mask(<2 x i8> %val, ptr %ptr, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  call void @llvm.vp.store.v2i8.p0(<2 x i8> %val, ptr %ptr, <2 x i1> %b, i32 %evl)
+  call void @llvm.vp.store.v2i8.p0(<2 x i8> %val, ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
index 06635519898376..4bbbad5ed0e0e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vrem_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vrem_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <4 x i8> @vrem_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -115,9 +109,7 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <8 x i8> @vrem_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <8 x i8> @vrem_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <16 x i8> @vrem_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <16 x i8> @vrem_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <2 x i16> @vrem_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -277,9 +259,7 @@ define <2 x i16> @vrem_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -301,9 +281,7 @@ define <4 x i16> @vrem_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -327,9 +305,7 @@ define <4 x i16> @vrem_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -351,9 +327,7 @@ define <8 x i16> @vrem_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -377,9 +351,7 @@ define <8 x i16> @vrem_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -401,9 +373,7 @@ define <16 x i16> @vrem_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -427,9 +397,7 @@ define <16 x i16> @vrem_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -451,9 +419,7 @@ define <2 x i32> @vrem_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -477,9 +443,7 @@ define <2 x i32> @vrem_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -501,9 +465,7 @@ define <4 x i32> @vrem_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -527,9 +489,7 @@ define <4 x i32> @vrem_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -551,9 +511,7 @@ define <8 x i32> @vrem_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -577,9 +535,7 @@ define <8 x i32> @vrem_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -601,9 +557,7 @@ define <16 x i32> @vrem_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -627,9 +581,7 @@ define <16 x i32> @vrem_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -651,9 +603,7 @@ define <2 x i64> @vrem_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -705,9 +655,7 @@ define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -729,9 +677,7 @@ define <4 x i64> @vrem_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -783,9 +729,7 @@ define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -807,9 +751,7 @@ define <8 x i64> @vrem_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -861,9 +803,7 @@ define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -885,9 +825,7 @@ define <16 x i64> @vrem_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -939,8 +877,6 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
index a329e73e5e86ef..ee11307bddc88c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vremu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vremu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -88,9 +84,7 @@ define <4 x i8> @vremu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -114,9 +108,7 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -150,9 +142,7 @@ define <8 x i8> @vremu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -176,9 +166,7 @@ define <8 x i8> @vremu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -200,9 +188,7 @@ define <16 x i8> @vremu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -226,9 +212,7 @@ define <16 x i8> @vremu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -250,9 +234,7 @@ define <2 x i16> @vremu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -276,9 +258,7 @@ define <2 x i16> @vremu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -300,9 +280,7 @@ define <4 x i16> @vremu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -326,9 +304,7 @@ define <4 x i16> @vremu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -350,9 +326,7 @@ define <8 x i16> @vremu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -376,9 +350,7 @@ define <8 x i16> @vremu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -400,9 +372,7 @@ define <16 x i16> @vremu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -426,9 +396,7 @@ define <16 x i16> @vremu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -450,9 +418,7 @@ define <2 x i32> @vremu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -476,9 +442,7 @@ define <2 x i32> @vremu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -500,9 +464,7 @@ define <4 x i32> @vremu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -526,9 +488,7 @@ define <4 x i32> @vremu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -550,9 +510,7 @@ define <8 x i32> @vremu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -576,9 +534,7 @@ define <8 x i32> @vremu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -600,9 +556,7 @@ define <16 x i32> @vremu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -626,9 +580,7 @@ define <16 x i32> @vremu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -650,9 +602,7 @@ define <2 x i64> @vremu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -704,9 +654,7 @@ define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -728,9 +676,7 @@ define <4 x i64> @vremu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -782,9 +728,7 @@ define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -806,9 +750,7 @@ define <8 x i64> @vremu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -860,9 +802,7 @@ define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -884,9 +824,7 @@ define <16 x i64> @vremu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -938,8 +876,6 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
index c7c757efc1badc..367c56caf813d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vror.ll
@@ -66,7 +66,7 @@ define <1 x i8> @vror_vi_v1i8(<1 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i8> @llvm.fshr.v1i8(<1 x i8> %a, <1 x i8> %a, <1 x i8> shufflevector(<1 x i8> insertelement(<1 x i8> poison, i8 1, i32 0), <1 x i8> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i8> @llvm.fshr.v1i8(<1 x i8> %a, <1 x i8> %a, <1 x i8> splat (i8 1))
   ret <1 x i8> %x
 }
 
@@ -84,7 +84,7 @@ define <1 x i8> @vror_vi_rotl_v1i8(<1 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i8> @llvm.fshl.v1i8(<1 x i8> %a, <1 x i8> %a, <1 x i8> shufflevector(<1 x i8> insertelement(<1 x i8> poison, i8 1, i32 0), <1 x i8> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i8> @llvm.fshl.v1i8(<1 x i8> %a, <1 x i8> %a, <1 x i8> splat (i8 1))
   ret <1 x i8> %x
 }
 
@@ -150,7 +150,7 @@ define <2 x i8> @vror_vi_v2i8(<2 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %a, <2 x i8> %a, <2 x i8> shufflevector(<2 x i8> insertelement(<2 x i8> poison, i8 1, i32 0), <2 x i8> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %a, <2 x i8> %a, <2 x i8> splat (i8 1))
   ret <2 x i8> %x
 }
 
@@ -168,7 +168,7 @@ define <2 x i8> @vror_vi_rotl_v2i8(<2 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i8> @llvm.fshl.v2i8(<2 x i8> %a, <2 x i8> %a, <2 x i8> shufflevector(<2 x i8> insertelement(<2 x i8> poison, i8 1, i32 0), <2 x i8> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i8> @llvm.fshl.v2i8(<2 x i8> %a, <2 x i8> %a, <2 x i8> splat (i8 1))
   ret <2 x i8> %x
 }
 
@@ -234,7 +234,7 @@ define <4 x i8> @vror_vi_v4i8(<4 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> %a, <4 x i8> %a, <4 x i8> shufflevector(<4 x i8> insertelement(<4 x i8> poison, i8 1, i32 0), <4 x i8> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> %a, <4 x i8> %a, <4 x i8> splat (i8 1))
   ret <4 x i8> %x
 }
 
@@ -252,7 +252,7 @@ define <4 x i8> @vror_vi_rotl_v4i8(<4 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> %a, <4 x i8> %a, <4 x i8> shufflevector(<4 x i8> insertelement(<4 x i8> poison, i8 1, i32 0), <4 x i8> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> %a, <4 x i8> %a, <4 x i8> splat (i8 1))
   ret <4 x i8> %x
 }
 
@@ -318,7 +318,7 @@ define <8 x i8> @vror_vi_v8i8(<8 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i8> @llvm.fshr.v8i8(<8 x i8> %a, <8 x i8> %a, <8 x i8> shufflevector(<8 x i8> insertelement(<8 x i8> poison, i8 1, i32 0), <8 x i8> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i8> @llvm.fshr.v8i8(<8 x i8> %a, <8 x i8> %a, <8 x i8> splat (i8 1))
   ret <8 x i8> %x
 }
 
@@ -336,7 +336,7 @@ define <8 x i8> @vror_vi_rotl_v8i8(<8 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i8> @llvm.fshl.v8i8(<8 x i8> %a, <8 x i8> %a, <8 x i8> shufflevector(<8 x i8> insertelement(<8 x i8> poison, i8 1, i32 0), <8 x i8> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i8> @llvm.fshl.v8i8(<8 x i8> %a, <8 x i8> %a, <8 x i8> splat (i8 1))
   ret <8 x i8> %x
 }
 
@@ -402,7 +402,7 @@ define <16 x i8> @vror_vi_v16i8(<16 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> shufflevector(<16 x i8> insertelement(<16 x i8> poison, i8 1, i32 0), <16 x i8> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> splat (i8 1))
   ret <16 x i8> %x
 }
 
@@ -420,7 +420,7 @@ define <16 x i8> @vror_vi_rotl_v16i8(<16 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> shufflevector(<16 x i8> insertelement(<16 x i8> poison, i8 1, i32 0), <16 x i8> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> splat (i8 1))
   ret <16 x i8> %x
 }
 
@@ -492,7 +492,7 @@ define <32 x i8> @vror_vi_v32i8(<32 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> shufflevector(<32 x i8> insertelement(<32 x i8> poison, i8 1, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer))
+  %x = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> splat (i8 1))
   ret <32 x i8> %x
 }
 
@@ -512,7 +512,7 @@ define <32 x i8> @vror_vi_rotl_v32i8(<32 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> shufflevector(<32 x i8> insertelement(<32 x i8> poison, i8 1, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer))
+  %x = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> splat (i8 1))
   ret <32 x i8> %x
 }
 
@@ -584,7 +584,7 @@ define <64 x i8> @vror_vi_v64i8(<64 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> shufflevector(<64 x i8> insertelement(<64 x i8> poison, i8 1, i32 0), <64 x i8> poison, <64 x i32> zeroinitializer))
+  %x = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> splat (i8 1))
   ret <64 x i8> %x
 }
 
@@ -604,7 +604,7 @@ define <64 x i8> @vror_vi_rotl_v64i8(<64 x i8> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 7
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> shufflevector(<64 x i8> insertelement(<64 x i8> poison, i8 1, i32 0), <64 x i8> poison, <64 x i32> zeroinitializer))
+  %x = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> splat (i8 1))
   ret <64 x i8> %x
 }
 
@@ -670,7 +670,7 @@ define <1 x i16> @vror_vi_v1i16(<1 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i16> @llvm.fshr.v1i16(<1 x i16> %a, <1 x i16> %a, <1 x i16> shufflevector(<1 x i16> insertelement(<1 x i16> poison, i16 1, i32 0), <1 x i16> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i16> @llvm.fshr.v1i16(<1 x i16> %a, <1 x i16> %a, <1 x i16> splat (i16 1))
   ret <1 x i16> %x
 }
 
@@ -688,7 +688,7 @@ define <1 x i16> @vror_vi_rotl_v1i16(<1 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i16> @llvm.fshl.v1i16(<1 x i16> %a, <1 x i16> %a, <1 x i16> shufflevector(<1 x i16> insertelement(<1 x i16> poison, i16 1, i32 0), <1 x i16> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i16> @llvm.fshl.v1i16(<1 x i16> %a, <1 x i16> %a, <1 x i16> splat (i16 1))
   ret <1 x i16> %x
 }
 
@@ -754,7 +754,7 @@ define <2 x i16> @vror_vi_v2i16(<2 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %a, <2 x i16> %a, <2 x i16> shufflevector(<2 x i16> insertelement(<2 x i16> poison, i16 1, i32 0), <2 x i16> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %a, <2 x i16> %a, <2 x i16> splat (i16 1))
   ret <2 x i16> %x
 }
 
@@ -772,7 +772,7 @@ define <2 x i16> @vror_vi_rotl_v2i16(<2 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> %a, <2 x i16> %a, <2 x i16> shufflevector(<2 x i16> insertelement(<2 x i16> poison, i16 1, i32 0), <2 x i16> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> %a, <2 x i16> %a, <2 x i16> splat (i16 1))
   ret <2 x i16> %x
 }
 
@@ -838,7 +838,7 @@ define <4 x i16> @vror_vi_v4i16(<4 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i16> @llvm.fshr.v4i16(<4 x i16> %a, <4 x i16> %a, <4 x i16> shufflevector(<4 x i16> insertelement(<4 x i16> poison, i16 1, i32 0), <4 x i16> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i16> @llvm.fshr.v4i16(<4 x i16> %a, <4 x i16> %a, <4 x i16> splat (i16 1))
   ret <4 x i16> %x
 }
 
@@ -856,7 +856,7 @@ define <4 x i16> @vror_vi_rotl_v4i16(<4 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i16> @llvm.fshl.v4i16(<4 x i16> %a, <4 x i16> %a, <4 x i16> shufflevector(<4 x i16> insertelement(<4 x i16> poison, i16 1, i32 0), <4 x i16> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i16> @llvm.fshl.v4i16(<4 x i16> %a, <4 x i16> %a, <4 x i16> splat (i16 1))
   ret <4 x i16> %x
 }
 
@@ -922,7 +922,7 @@ define <8 x i16> @vror_vi_v8i16(<8 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> shufflevector(<8 x i16> insertelement(<8 x i16> poison, i16 1, i32 0), <8 x i16> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> splat (i16 1))
   ret <8 x i16> %x
 }
 
@@ -940,7 +940,7 @@ define <8 x i16> @vror_vi_rotl_v8i16(<8 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> shufflevector(<8 x i16> insertelement(<8 x i16> poison, i16 1, i32 0), <8 x i16> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %a, <8 x i16> %a, <8 x i16> splat (i16 1))
   ret <8 x i16> %x
 }
 
@@ -1006,7 +1006,7 @@ define <16 x i16> @vror_vi_v16i16(<16 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %a, <16 x i16> %a, <16 x i16> shufflevector(<16 x i16> insertelement(<16 x i16> poison, i16 1, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i16> @llvm.fshr.v16i16(<16 x i16> %a, <16 x i16> %a, <16 x i16> splat (i16 1))
   ret <16 x i16> %x
 }
 
@@ -1024,7 +1024,7 @@ define <16 x i16> @vror_vi_rotl_v16i16(<16 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %a, <16 x i16> %a, <16 x i16> shufflevector(<16 x i16> insertelement(<16 x i16> poison, i16 1, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i16> @llvm.fshl.v16i16(<16 x i16> %a, <16 x i16> %a, <16 x i16> splat (i16 1))
   ret <16 x i16> %x
 }
 
@@ -1096,7 +1096,7 @@ define <32 x i16> @vror_vi_v32i16(<32 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %a, <32 x i16> %a, <32 x i16> shufflevector(<32 x i16> insertelement(<32 x i16> poison, i16 1, i32 0), <32 x i16> poison, <32 x i32> zeroinitializer))
+  %x = call <32 x i16> @llvm.fshr.v32i16(<32 x i16> %a, <32 x i16> %a, <32 x i16> splat (i16 1))
   ret <32 x i16> %x
 }
 
@@ -1116,7 +1116,7 @@ define <32 x i16> @vror_vi_rotl_v32i16(<32 x i16> %a) {
 ; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 15
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %a, <32 x i16> %a, <32 x i16> shufflevector(<32 x i16> insertelement(<32 x i16> poison, i16 1, i32 0), <32 x i16> poison, <32 x i32> zeroinitializer))
+  %x = call <32 x i16> @llvm.fshl.v32i16(<32 x i16> %a, <32 x i16> %a, <32 x i16> splat (i16 1))
   ret <32 x i16> %x
 }
 
@@ -1184,7 +1184,7 @@ define <1 x i32> @vror_vi_v1i32(<1 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i32> @llvm.fshr.v1i32(<1 x i32> %a, <1 x i32> %a, <1 x i32> shufflevector(<1 x i32> insertelement(<1 x i32> poison, i32 1, i32 0), <1 x i32> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i32> @llvm.fshr.v1i32(<1 x i32> %a, <1 x i32> %a, <1 x i32> splat (i32 1))
   ret <1 x i32> %x
 }
 
@@ -1202,7 +1202,7 @@ define <1 x i32> @vror_vi_rotl_v1i32(<1 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 31
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i32> @llvm.fshl.v1i32(<1 x i32> %a, <1 x i32> %a, <1 x i32> shufflevector(<1 x i32> insertelement(<1 x i32> poison, i32 1, i32 0), <1 x i32> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i32> @llvm.fshl.v1i32(<1 x i32> %a, <1 x i32> %a, <1 x i32> splat (i32 1))
   ret <1 x i32> %x
 }
 
@@ -1270,7 +1270,7 @@ define <2 x i32> @vror_vi_v2i32(<2 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> %a, <2 x i32> %a, <2 x i32> shufflevector(<2 x i32> insertelement(<2 x i32> poison, i32 1, i32 0), <2 x i32> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> %a, <2 x i32> %a, <2 x i32> splat (i32 1))
   ret <2 x i32> %x
 }
 
@@ -1288,7 +1288,7 @@ define <2 x i32> @vror_vi_rotl_v2i32(<2 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 31
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %a, <2 x i32> %a, <2 x i32> shufflevector(<2 x i32> insertelement(<2 x i32> poison, i32 1, i32 0), <2 x i32> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %a, <2 x i32> %a, <2 x i32> splat (i32 1))
   ret <2 x i32> %x
 }
 
@@ -1356,7 +1356,7 @@ define <4 x i32> @vror_vi_v4i32(<4 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> shufflevector(<4 x i32> insertelement(<4 x i32> poison, i32 1, i32 0), <4 x i32> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> splat (i32 1))
   ret <4 x i32> %x
 }
 
@@ -1374,7 +1374,7 @@ define <4 x i32> @vror_vi_rotl_v4i32(<4 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 31
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> shufflevector(<4 x i32> insertelement(<4 x i32> poison, i32 1, i32 0), <4 x i32> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %a, <4 x i32> %a, <4 x i32> splat (i32 1))
   ret <4 x i32> %x
 }
 
@@ -1442,7 +1442,7 @@ define <8 x i32> @vror_vi_v8i32(<8 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %a, <8 x i32> %a, <8 x i32> shufflevector(<8 x i32> insertelement(<8 x i32> poison, i32 1, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i32> @llvm.fshr.v8i32(<8 x i32> %a, <8 x i32> %a, <8 x i32> splat (i32 1))
   ret <8 x i32> %x
 }
 
@@ -1460,7 +1460,7 @@ define <8 x i32> @vror_vi_rotl_v8i32(<8 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 31
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %a, <8 x i32> %a, <8 x i32> shufflevector(<8 x i32> insertelement(<8 x i32> poison, i32 1, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %a, <8 x i32> %a, <8 x i32> splat (i32 1))
   ret <8 x i32> %x
 }
 
@@ -1528,7 +1528,7 @@ define <16 x i32> @vror_vi_v16i32(<16 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %a, <16 x i32> %a, <16 x i32> shufflevector(<16 x i32> insertelement(<16 x i32> poison, i32 1, i32 0), <16 x i32> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> %a, <16 x i32> %a, <16 x i32> splat (i32 1))
   ret <16 x i32> %x
 }
 
@@ -1546,7 +1546,7 @@ define <16 x i32> @vror_vi_rotl_v16i32(<16 x i32> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 31
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %a, <16 x i32> %a, <16 x i32> shufflevector(<16 x i32> insertelement(<16 x i32> poison, i32 1, i32 0), <16 x i32> poison, <16 x i32> zeroinitializer))
+  %x = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %a, <16 x i32> %a, <16 x i32> splat (i32 1))
   ret <16 x i32> %x
 }
 
@@ -1629,7 +1629,7 @@ define <1 x i64> @vror_vi_v1i64(<1 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i64> @llvm.fshr.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> shufflevector(<1 x i64> insertelement(<1 x i64> poison, i64 1, i32 0), <1 x i64> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i64> @llvm.fshr.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> splat (i64 1))
   ret <1 x i64> %x
 }
 
@@ -1662,7 +1662,7 @@ define <1 x i64> @vror_vi_rotl_v1i64(<1 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 63
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> shufflevector(<1 x i64> insertelement(<1 x i64> poison, i64 1, i32 0), <1 x i64> poison, <1 x i32> zeroinitializer))
+  %x = call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %a, <1 x i64> %a, <1 x i64> splat (i64 1))
   ret <1 x i64> %x
 }
 
@@ -1745,7 +1745,7 @@ define <2 x i64> @vror_vi_v2i64(<2 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> shufflevector(<2 x i64> insertelement(<2 x i64> poison, i64 1, i32 0), <2 x i64> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> splat (i64 1))
   ret <2 x i64> %x
 }
 
@@ -1778,7 +1778,7 @@ define <2 x i64> @vror_vi_rotl_v2i64(<2 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 63
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> shufflevector(<2 x i64> insertelement(<2 x i64> poison, i64 1, i32 0), <2 x i64> poison, <2 x i32> zeroinitializer))
+  %x = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> splat (i64 1))
   ret <2 x i64> %x
 }
 
@@ -1861,7 +1861,7 @@ define <4 x i64> @vror_vi_v4i64(<4 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> shufflevector(<4 x i64> insertelement(<4 x i64> poison, i64 1, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i64> @llvm.fshr.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> splat (i64 1))
   ret <4 x i64> %x
 }
 
@@ -1894,7 +1894,7 @@ define <4 x i64> @vror_vi_rotl_v4i64(<4 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 63
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> shufflevector(<4 x i64> insertelement(<4 x i64> poison, i64 1, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer))
+  %x = call <4 x i64> @llvm.fshl.v4i64(<4 x i64> %a, <4 x i64> %a, <4 x i64> splat (i64 1))
   ret <4 x i64> %x
 }
 
@@ -1977,7 +1977,7 @@ define <8 x i64> @vror_vi_v8i64(<8 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 1
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> shufflevector(<8 x i64> insertelement(<8 x i64> poison, i64 1, i32 0), <8 x i64> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> splat (i64 1))
   ret <8 x i64> %x
 }
 
@@ -2010,6 +2010,6 @@ define <8 x i64> @vror_vi_rotl_v8i64(<8 x i64> %a) {
 ; CHECK-ZVKB-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vror.vi v8, v8, 63
 ; CHECK-ZVKB-NEXT:    ret
-  %x = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> shufflevector(<8 x i64> insertelement(<8 x i64> poison, i64 1, i32 0), <8 x i64> poison, <8 x i32> zeroinitializer))
+  %x = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> %a, <8 x i64> %a, <8 x i64> splat (i64 1))
   ret <8 x i64> %x
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
index fe433b80a87fb4..563482b88e8bdb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
@@ -26,9 +26,7 @@ define <2 x i8> @vrsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -38,9 +36,7 @@ define <2 x i8> @vrsub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> splat (i8 2), <2 x i8> %va, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -50,11 +46,7 @@ define <2 x i8> @vrsub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> splat (i8 2), <2 x i8> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -80,9 +72,7 @@ define <4 x i8> @vrsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -92,9 +82,7 @@ define <4 x i8> @vrsub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> splat (i8 2), <4 x i8> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -104,11 +92,7 @@ define <4 x i8> @vrsub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> splat (i8 2), <4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -134,9 +118,7 @@ define <8 x i8> @vrsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %vb, <8 x i8> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %vb, <8 x i8> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -146,9 +128,7 @@ define <8 x i8> @vrsub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %vb, <8 x i8> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> splat (i8 2), <8 x i8> %va, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -158,11 +138,7 @@ define <8 x i8> @vrsub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %vb, <8 x i8> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> splat (i8 2), <8 x i8> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -188,9 +164,7 @@ define <16 x i8> @vrsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %vb, <16 x i8> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %vb, <16 x i8> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -200,9 +174,7 @@ define <16 x i8> @vrsub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %vb, <16 x i8> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> splat (i8 2), <16 x i8> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -212,11 +184,7 @@ define <16 x i8> @vrsub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %vb, <16 x i8> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> splat (i8 2), <16 x i8> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -242,9 +210,7 @@ define <2 x i16> @vrsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %vb, <2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %vb, <2 x i16> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -254,9 +220,7 @@ define <2 x i16> @vrsub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %vb, <2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> splat (i16 2), <2 x i16> %va, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -266,11 +230,7 @@ define <2 x i16> @vrsub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %vb, <2 x i16> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> splat (i16 2), <2 x i16> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -296,9 +256,7 @@ define <4 x i16> @vrsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %vb, <4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %vb, <4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -308,9 +266,7 @@ define <4 x i16> @vrsub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %vb, <4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> splat (i16 2), <4 x i16> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -320,11 +276,7 @@ define <4 x i16> @vrsub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %vb, <4 x i16> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> splat (i16 2), <4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -350,9 +302,7 @@ define <8 x i16> @vrsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %vb, <8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %vb, <8 x i16> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -362,9 +312,7 @@ define <8 x i16> @vrsub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %vb, <8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> splat (i16 2), <8 x i16> %va, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -374,11 +322,7 @@ define <8 x i16> @vrsub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %vb, <8 x i16> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> splat (i16 2), <8 x i16> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -404,9 +348,7 @@ define <16 x i16> @vrsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %vb, <16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %vb, <16 x i16> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -416,9 +358,7 @@ define <16 x i16> @vrsub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %vb, <16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> splat (i16 2), <16 x i16> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -428,11 +368,7 @@ define <16 x i16> @vrsub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %vb, <16 x i16> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> splat (i16 2), <16 x i16> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -458,9 +394,7 @@ define <2 x i32> @vrsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %vb, <2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %vb, <2 x i32> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -470,9 +404,7 @@ define <2 x i32> @vrsub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %vb, <2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> splat (i32 2), <2 x i32> %va, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -482,11 +414,7 @@ define <2 x i32> @vrsub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %vb, <2 x i32> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> splat (i32 2), <2 x i32> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -512,9 +440,7 @@ define <4 x i32> @vrsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %vb, <4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %vb, <4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -524,9 +450,7 @@ define <4 x i32> @vrsub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %vb, <4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> splat (i32 2), <4 x i32> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -536,11 +460,7 @@ define <4 x i32> @vrsub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %vb, <4 x i32> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> splat (i32 2), <4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -566,9 +486,7 @@ define <8 x i32> @vrsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %vb, <8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %vb, <8 x i32> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -578,9 +496,7 @@ define <8 x i32> @vrsub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %vb, <8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> splat (i32 2), <8 x i32> %va, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -590,11 +506,7 @@ define <8 x i32> @vrsub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %vb, <8 x i32> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> splat (i32 2), <8 x i32> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -620,9 +532,7 @@ define <16 x i32> @vrsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %vb, <16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %vb, <16 x i32> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -632,9 +542,7 @@ define <16 x i32> @vrsub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %vb, <16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> splat (i32 2), <16 x i32> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -644,11 +552,7 @@ define <16 x i32> @vrsub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %vb, <16 x i32> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> splat (i32 2), <16 x i32> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -702,9 +606,7 @@ define <2 x i64> @vrsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %vb, <2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %vb, <2 x i64> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -714,9 +616,7 @@ define <2 x i64> @vrsub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %vb, <2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> splat (i64 2), <2 x i64> %va, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -726,11 +626,7 @@ define <2 x i64> @vrsub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %vb, <2 x i64> %va, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> splat (i64 2), <2 x i64> %va, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -784,9 +680,7 @@ define <4 x i64> @vrsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %vb, <4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %vb, <4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -796,9 +690,7 @@ define <4 x i64> @vrsub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %vb, <4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> splat (i64 2), <4 x i64> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -808,11 +700,7 @@ define <4 x i64> @vrsub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %vb, <4 x i64> %va, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> splat (i64 2), <4 x i64> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -866,9 +754,7 @@ define <8 x i64> @vrsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %vb, <8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %vb, <8 x i64> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -878,9 +764,7 @@ define <8 x i64> @vrsub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %vb, <8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> splat (i64 2), <8 x i64> %va, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -890,11 +774,7 @@ define <8 x i64> @vrsub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %vb, <8 x i64> %va, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> splat (i64 2), <8 x i64> %va, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -948,9 +828,7 @@ define <16 x i64> @vrsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %vb, <16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %vb, <16 x i64> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -960,9 +838,7 @@ define <16 x i64> @vrsub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %vb, <16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> splat (i64 2), <16 x i64> %va, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -972,10 +848,6 @@ define <16 x i64> @vrsub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %vb, <16 x i64> %va, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> splat (i64 2), <16 x i64> %va, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index d7ed20f4e09868..291629de6dcfab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -43,9 +43,7 @@ define <2 x i8> @vsadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -69,9 +67,7 @@ define <2 x i8> @vsadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -81,9 +77,7 @@ define <2 x i8> @vsadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -93,11 +87,7 @@ define <2 x i8> @vsadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -119,9 +109,7 @@ define <4 x i8> @vsadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -157,9 +145,7 @@ define <4 x i8> @vsadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -169,9 +155,7 @@ define <4 x i8> @vsadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -181,11 +165,7 @@ define <4 x i8> @vsadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -207,9 +187,7 @@ define <5 x i8> @vsadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -233,9 +211,7 @@ define <5 x i8> @vsadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -245,9 +221,7 @@ define <5 x i8> @vsadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -257,11 +231,7 @@ define <5 x i8> @vsadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -283,9 +253,7 @@ define <8 x i8> @vsadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -309,9 +277,7 @@ define <8 x i8> @vsadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -321,9 +287,7 @@ define <8 x i8> @vsadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -333,11 +297,7 @@ define <8 x i8> @vsadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -359,9 +319,7 @@ define <16 x i8> @vsadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -385,9 +343,7 @@ define <16 x i8> @vsadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -397,9 +353,7 @@ define <16 x i8> @vsadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -409,11 +363,7 @@ define <16 x i8> @vsadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -440,9 +390,7 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -464,11 +412,7 @@ define <256 x i8> @vsadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -485,9 +429,7 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -504,9 +446,7 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -528,9 +468,7 @@ define <2 x i16> @vsadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -554,9 +492,7 @@ define <2 x i16> @vsadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -566,9 +502,7 @@ define <2 x i16> @vsadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -578,11 +512,7 @@ define <2 x i16> @vsadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -604,9 +534,7 @@ define <4 x i16> @vsadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -630,9 +558,7 @@ define <4 x i16> @vsadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -642,9 +568,7 @@ define <4 x i16> @vsadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -654,11 +578,7 @@ define <4 x i16> @vsadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -680,9 +600,7 @@ define <8 x i16> @vsadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -706,9 +624,7 @@ define <8 x i16> @vsadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -718,9 +634,7 @@ define <8 x i16> @vsadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -730,11 +644,7 @@ define <8 x i16> @vsadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -756,9 +666,7 @@ define <16 x i16> @vsadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -782,9 +690,7 @@ define <16 x i16> @vsadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -794,9 +700,7 @@ define <16 x i16> @vsadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -806,11 +710,7 @@ define <16 x i16> @vsadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -832,9 +732,7 @@ define <2 x i32> @vsadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -858,9 +756,7 @@ define <2 x i32> @vsadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -870,9 +766,7 @@ define <2 x i32> @vsadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -882,11 +776,7 @@ define <2 x i32> @vsadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -908,9 +798,7 @@ define <4 x i32> @vsadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -934,9 +822,7 @@ define <4 x i32> @vsadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -946,9 +832,7 @@ define <4 x i32> @vsadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -958,11 +842,7 @@ define <4 x i32> @vsadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -984,9 +864,7 @@ define <8 x i32> @vsadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1010,9 +888,7 @@ define <8 x i32> @vsadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1022,9 +898,7 @@ define <8 x i32> @vsadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1034,11 +908,7 @@ define <8 x i32> @vsadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1060,9 +930,7 @@ define <16 x i32> @vsadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1086,9 +954,7 @@ define <16 x i32> @vsadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1098,9 +964,7 @@ define <16 x i32> @vsadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1110,11 +974,7 @@ define <16 x i32> @vsadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1136,9 +996,7 @@ define <2 x i64> @vsadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1190,9 +1048,7 @@ define <2 x i64> @vsadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1202,9 +1058,7 @@ define <2 x i64> @vsadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1214,11 +1068,7 @@ define <2 x i64> @vsadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1240,9 +1090,7 @@ define <4 x i64> @vsadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1294,9 +1142,7 @@ define <4 x i64> @vsadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1306,9 +1152,7 @@ define <4 x i64> @vsadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1318,11 +1162,7 @@ define <4 x i64> @vsadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1344,9 +1184,7 @@ define <8 x i64> @vsadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1398,9 +1236,7 @@ define <8 x i64> @vsadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1410,9 +1246,7 @@ define <8 x i64> @vsadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1422,11 +1256,7 @@ define <8 x i64> @vsadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1448,9 +1278,7 @@ define <16 x i64> @vsadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1502,9 +1330,7 @@ define <16 x i64> @vsadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1514,9 +1340,7 @@ define <16 x i64> @vsadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1526,11 +1350,7 @@ define <16 x i64> @vsadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1542,16 +1362,16 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vsadd_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1583,24 +1403,22 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vsadd_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1628,11 +1446,7 @@ define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vsadd.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1663,9 +1477,7 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1694,8 +1506,6 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
index 8e655a7faf3e81..741699289e0271 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
@@ -34,9 +34,7 @@ define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
+  %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5))
   ret <2 x i8> %v
 }
 
@@ -70,9 +68,7 @@ define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
+  %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5))
   ret <4 x i8> %v
 }
 
@@ -106,9 +102,7 @@ define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
+  %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5))
   ret <8 x i8> %v
 }
 
@@ -142,9 +136,7 @@ define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
+  %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5))
   ret <16 x i8> %v
 }
 
@@ -178,9 +170,7 @@ define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
+  %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5))
   ret <2 x i16> %v
 }
 
@@ -214,9 +204,7 @@ define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
+  %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5))
   ret <4 x i16> %v
 }
 
@@ -250,9 +238,7 @@ define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
+  %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5))
   ret <8 x i16> %v
 }
 
@@ -286,9 +272,7 @@ define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
+  %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5))
   ret <16 x i16> %v
 }
 
@@ -334,9 +318,7 @@ define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
+  %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5))
   ret <2 x i32> %v
 }
 
@@ -370,9 +352,7 @@ define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
+  %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5))
   ret <4 x i32> %v
 }
 
@@ -406,9 +386,7 @@ define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
+  %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5))
   ret <8 x i32> %v
 }
 
@@ -442,9 +420,7 @@ define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
+  %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5))
   ret <16 x i32> %v
 }
 
@@ -491,9 +467,7 @@ define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
+  %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5))
   ret <2 x i64> %v
 }
 
@@ -540,9 +514,7 @@ define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
+  %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5))
   ret <4 x i64> %v
 }
 
@@ -589,9 +561,7 @@ define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
+  %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5))
   ret <8 x i64> %v
 }
 
@@ -638,8 +608,6 @@ define <16 x i64> @sadd_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
+  %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5))
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index ea248010ef09a1..d38ee1148e894e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vsaddu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vsaddu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -77,9 +73,7 @@ define <2 x i8> @vsaddu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,11 +83,7 @@ define <2 x i8> @vsaddu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -115,9 +105,7 @@ define <4 x i8> @vsaddu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -153,9 +141,7 @@ define <4 x i8> @vsaddu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -165,9 +151,7 @@ define <4 x i8> @vsaddu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -177,11 +161,7 @@ define <4 x i8> @vsaddu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -203,9 +183,7 @@ define <5 x i8> @vsaddu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -229,9 +207,7 @@ define <5 x i8> @vsaddu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -241,9 +217,7 @@ define <5 x i8> @vsaddu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -253,11 +227,7 @@ define <5 x i8> @vsaddu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -279,9 +249,7 @@ define <8 x i8> @vsaddu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -305,9 +273,7 @@ define <8 x i8> @vsaddu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -317,9 +283,7 @@ define <8 x i8> @vsaddu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -329,11 +293,7 @@ define <8 x i8> @vsaddu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -355,9 +315,7 @@ define <16 x i8> @vsaddu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -381,9 +339,7 @@ define <16 x i8> @vsaddu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -393,9 +349,7 @@ define <16 x i8> @vsaddu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -405,11 +359,7 @@ define <16 x i8> @vsaddu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -436,9 +386,7 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -460,11 +408,7 @@ define <256 x i8> @vsaddu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -481,9 +425,7 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -500,9 +442,7 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -524,9 +464,7 @@ define <2 x i16> @vsaddu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -550,9 +488,7 @@ define <2 x i16> @vsaddu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -562,9 +498,7 @@ define <2 x i16> @vsaddu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -574,11 +508,7 @@ define <2 x i16> @vsaddu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -600,9 +530,7 @@ define <4 x i16> @vsaddu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -626,9 +554,7 @@ define <4 x i16> @vsaddu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -638,9 +564,7 @@ define <4 x i16> @vsaddu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -650,11 +574,7 @@ define <4 x i16> @vsaddu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -676,9 +596,7 @@ define <8 x i16> @vsaddu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -702,9 +620,7 @@ define <8 x i16> @vsaddu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -714,9 +630,7 @@ define <8 x i16> @vsaddu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -726,11 +640,7 @@ define <8 x i16> @vsaddu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -752,9 +662,7 @@ define <16 x i16> @vsaddu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -778,9 +686,7 @@ define <16 x i16> @vsaddu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -790,9 +696,7 @@ define <16 x i16> @vsaddu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -802,11 +706,7 @@ define <16 x i16> @vsaddu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -828,9 +728,7 @@ define <2 x i32> @vsaddu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -854,9 +752,7 @@ define <2 x i32> @vsaddu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -866,9 +762,7 @@ define <2 x i32> @vsaddu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -878,11 +772,7 @@ define <2 x i32> @vsaddu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -904,9 +794,7 @@ define <4 x i32> @vsaddu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -930,9 +818,7 @@ define <4 x i32> @vsaddu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -942,9 +828,7 @@ define <4 x i32> @vsaddu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -954,11 +838,7 @@ define <4 x i32> @vsaddu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -980,9 +860,7 @@ define <8 x i32> @vsaddu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1006,9 +884,7 @@ define <8 x i32> @vsaddu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1018,9 +894,7 @@ define <8 x i32> @vsaddu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1030,11 +904,7 @@ define <8 x i32> @vsaddu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1056,9 +926,7 @@ define <16 x i32> @vsaddu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1082,9 +950,7 @@ define <16 x i32> @vsaddu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1094,9 +960,7 @@ define <16 x i32> @vsaddu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1106,11 +970,7 @@ define <16 x i32> @vsaddu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1132,9 +992,7 @@ define <2 x i64> @vsaddu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1186,9 +1044,7 @@ define <2 x i64> @vsaddu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1198,9 +1054,7 @@ define <2 x i64> @vsaddu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1210,11 +1064,7 @@ define <2 x i64> @vsaddu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1236,9 +1086,7 @@ define <4 x i64> @vsaddu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1290,9 +1138,7 @@ define <4 x i64> @vsaddu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1302,9 +1148,7 @@ define <4 x i64> @vsaddu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1314,11 +1158,7 @@ define <4 x i64> @vsaddu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1340,9 +1180,7 @@ define <8 x i64> @vsaddu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1394,9 +1232,7 @@ define <8 x i64> @vsaddu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1406,9 +1242,7 @@ define <8 x i64> @vsaddu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1418,11 +1252,7 @@ define <8 x i64> @vsaddu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1444,9 +1274,7 @@ define <16 x i64> @vsaddu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1498,9 +1326,7 @@ define <16 x i64> @vsaddu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1510,9 +1336,7 @@ define <16 x i64> @vsaddu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1522,11 +1346,7 @@ define <16 x i64> @vsaddu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1538,16 +1358,16 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV32-LABEL: vsaddu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1579,24 +1399,22 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vsaddu_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1624,11 +1442,7 @@ define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1659,9 +1473,7 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1690,8 +1502,6 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
index 5764f51c0ff10b..7b2cab294aa496 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
@@ -34,9 +34,7 @@ define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
+  %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 8))
   ret <2 x i8> %v
 }
 
@@ -70,9 +68,7 @@ define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
+  %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 8))
   ret <4 x i8> %v
 }
 
@@ -106,9 +102,7 @@ define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
+  %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 8))
   ret <8 x i8> %v
 }
 
@@ -142,9 +136,7 @@ define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
+  %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 8))
   ret <16 x i8> %v
 }
 
@@ -178,9 +170,7 @@ define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
+  %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 8))
   ret <2 x i16> %v
 }
 
@@ -214,9 +204,7 @@ define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
+  %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 8))
   ret <4 x i16> %v
 }
 
@@ -250,9 +238,7 @@ define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
+  %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 8))
   ret <8 x i16> %v
 }
 
@@ -286,9 +272,7 @@ define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
+  %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 8))
   ret <16 x i16> %v
 }
 
@@ -334,9 +318,7 @@ define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
+  %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 8))
   ret <2 x i32> %v
 }
 
@@ -370,9 +352,7 @@ define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
+  %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 8))
   ret <4 x i32> %v
 }
 
@@ -406,9 +386,7 @@ define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
+  %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 8))
   ret <8 x i32> %v
 }
 
@@ -442,9 +420,7 @@ define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
+  %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 8))
   ret <16 x i32> %v
 }
 
@@ -491,9 +467,7 @@ define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
+  %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 8))
   ret <2 x i64> %v
 }
 
@@ -540,9 +514,7 @@ define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
+  %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 8))
   ret <4 x i64> %v
 }
 
@@ -589,9 +561,7 @@ define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
+  %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 8))
   ret <8 x i64> %v
 }
 
@@ -638,8 +608,6 @@ define <16 x i64> @uadd_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
+  %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 8))
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index ead41b0717ff1c..7dcd4c41998279 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -204,10 +204,8 @@ define void @vselect_vi_v6i32(ptr %b, ptr %cc, ptr %z) {
 ; RV64-NEXT:    vse32.v v8, (a2)
 ; RV64-NEXT:    ret
   %vb = load <6 x i32>, ptr %b
-  %a = insertelement <6 x i32> poison, i32 -1, i32 0
-  %va = shufflevector <6 x i32> %a, <6 x i32> poison, <6 x i32> zeroinitializer
   %vcc = load <6 x i1>, ptr %cc
-  %vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb
+  %vsel = select <6 x i1> %vcc, <6 x i32> splat (i32 -1), <6 x i32> %vb
   store <6 x i32> %vsel, ptr %z
   ret void
 }
@@ -415,10 +413,8 @@ define void @vselect_vfpzero_v6f32(ptr %b, ptr %cc, ptr %z) {
 ; RV64-NEXT:    vse32.v v8, (a2)
 ; RV64-NEXT:    ret
   %vb = load <6 x float>, ptr %b
-  %a = insertelement <6 x float> poison, float 0.0, i32 0
-  %va = shufflevector <6 x float> %a, <6 x float> poison, <6 x i32> zeroinitializer
   %vcc = load <6 x i1>, ptr %cc
-  %vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb
+  %vsel = select <6 x i1> %vcc, <6 x float> splat (float 0.0), <6 x float> %vb
   store <6 x float> %vsel, ptr %z
   ret void
 }
@@ -468,10 +464,8 @@ define void @vselect_vi_v8i32(ptr %b, ptr %cc, ptr %z) {
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <8 x i32>, ptr %b
-  %a = insertelement <8 x i32> poison, i32 -1, i32 0
-  %va = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, ptr %cc
-  %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
+  %vsel = select <8 x i1> %vcc, <8 x i32> splat (i32 -1), <8 x i32> %vb
   store <8 x i32> %vsel, ptr %z
   ret void
 }
@@ -521,10 +515,8 @@ define void @vselect_vfpzero_v8f32(ptr %b, ptr %cc, ptr %z) {
 ; CHECK-NEXT:    vse32.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <8 x float>, ptr %b
-  %a = insertelement <8 x float> poison, float 0.0, i32 0
-  %va = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
   %vcc = load <8 x i1>, ptr %cc
-  %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
+  %vsel = select <8 x i1> %vcc, <8 x float> splat (float 0.0), <8 x float> %vb
   store <8 x float> %vsel, ptr %z
   ret void
 }
@@ -574,10 +566,8 @@ define void @vselect_vi_v16i16(ptr %b, ptr %cc, ptr %z) {
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <16 x i16>, ptr %b
-  %a = insertelement <16 x i16> poison, i16 4, i32 0
-  %va = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
   %vcc = load <16 x i1>, ptr %cc
-  %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
+  %vsel = select <16 x i1> %vcc, <16 x i16> splat (i16 4), <16 x i16> %vb
   store <16 x i16> %vsel, ptr %z
   ret void
 }
@@ -630,10 +620,8 @@ define void @vselect_vfpzero_v32f16(ptr %b, ptr %cc, ptr %z) {
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    ret
   %vb = load <32 x half>, ptr %b
-  %a = insertelement <32 x half> poison, half 0.0, i32 0
-  %va = shufflevector <32 x half> %a, <32 x half> poison, <32 x i32> zeroinitializer
   %vcc = load <32 x i1>, ptr %cc
-  %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
+  %vsel = select <32 x i1> %vcc, <32 x half> splat (half 0.0), <32 x half> %vb
   store <32 x half> %vsel, ptr %z
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
index bbbd6d69e37c3a..c4b7c1f2f19f0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
@@ -37,9 +37,7 @@ define <2 x i8> @vsll_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -63,9 +61,7 @@ define <2 x i8> @vsll_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -75,9 +71,7 @@ define <2 x i8> @vsll_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> splat (i8 3), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -87,11 +81,7 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> splat (i8 3), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -125,9 +115,7 @@ define <4 x i8> @vsll_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,9 +139,7 @@ define <4 x i8> @vsll_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -163,9 +149,7 @@ define <4 x i8> @vsll_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> splat (i8 3), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -175,11 +159,7 @@ define <4 x i8> @vsll_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> splat (i8 3), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -201,9 +181,7 @@ define <8 x i8> @vsll_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -227,9 +205,7 @@ define <8 x i8> @vsll_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -239,9 +215,7 @@ define <8 x i8> @vsll_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> splat (i8 3), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -251,11 +225,7 @@ define <8 x i8> @vsll_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> splat (i8 3), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -277,9 +247,7 @@ define <16 x i8> @vsll_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -303,9 +271,7 @@ define <16 x i8> @vsll_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -315,9 +281,7 @@ define <16 x i8> @vsll_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> splat (i8 3), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -327,11 +291,7 @@ define <16 x i8> @vsll_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> splat (i8 3), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -353,9 +313,7 @@ define <2 x i16> @vsll_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -379,9 +337,7 @@ define <2 x i16> @vsll_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -391,9 +347,7 @@ define <2 x i16> @vsll_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> splat (i16 3), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -403,11 +357,7 @@ define <2 x i16> @vsll_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> splat (i16 3), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -429,9 +379,7 @@ define <4 x i16> @vsll_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -455,9 +403,7 @@ define <4 x i16> @vsll_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -467,9 +413,7 @@ define <4 x i16> @vsll_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> splat (i16 3), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -479,11 +423,7 @@ define <4 x i16> @vsll_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> splat (i16 3), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -505,9 +445,7 @@ define <8 x i16> @vsll_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -531,9 +469,7 @@ define <8 x i16> @vsll_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -543,9 +479,7 @@ define <8 x i16> @vsll_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> splat (i16 3), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -555,11 +489,7 @@ define <8 x i16> @vsll_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> splat (i16 3), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -581,9 +511,7 @@ define <16 x i16> @vsll_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -607,9 +535,7 @@ define <16 x i16> @vsll_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -619,9 +545,7 @@ define <16 x i16> @vsll_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> splat (i16 3), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -631,11 +555,7 @@ define <16 x i16> @vsll_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> splat (i16 3), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -657,9 +577,7 @@ define <2 x i32> @vsll_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -683,9 +601,7 @@ define <2 x i32> @vsll_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -695,9 +611,7 @@ define <2 x i32> @vsll_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> splat (i32 3), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -707,11 +621,7 @@ define <2 x i32> @vsll_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> splat (i32 3), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -733,9 +643,7 @@ define <4 x i32> @vsll_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -759,9 +667,7 @@ define <4 x i32> @vsll_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -771,9 +677,7 @@ define <4 x i32> @vsll_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> splat (i32 3), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -783,11 +687,7 @@ define <4 x i32> @vsll_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> splat (i32 3), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -809,9 +709,7 @@ define <8 x i32> @vsll_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -835,9 +733,7 @@ define <8 x i32> @vsll_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -847,9 +743,7 @@ define <8 x i32> @vsll_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> splat (i32 3), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -859,11 +753,7 @@ define <8 x i32> @vsll_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> splat (i32 3), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -885,9 +775,7 @@ define <16 x i32> @vsll_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -911,9 +799,7 @@ define <16 x i32> @vsll_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -923,9 +809,7 @@ define <16 x i32> @vsll_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> splat (i32 3), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -935,11 +819,7 @@ define <16 x i32> @vsll_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> splat (i32 3), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -961,9 +841,7 @@ define <2 x i64> @vsll_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -999,9 +877,7 @@ define <2 x i64> @vsll_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1011,9 +887,7 @@ define <2 x i64> @vsll_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> splat (i64 3), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1023,11 +897,7 @@ define <2 x i64> @vsll_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> splat (i64 3), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1049,9 +919,7 @@ define <4 x i64> @vsll_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1087,9 +955,7 @@ define <4 x i64> @vsll_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1099,9 +965,7 @@ define <4 x i64> @vsll_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> splat (i64 3), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1111,11 +975,7 @@ define <4 x i64> @vsll_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> splat (i64 3), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1137,9 +997,7 @@ define <8 x i64> @vsll_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1175,9 +1033,7 @@ define <8 x i64> @vsll_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1187,9 +1043,7 @@ define <8 x i64> @vsll_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> splat (i64 3), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1199,11 +1053,7 @@ define <8 x i64> @vsll_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> splat (i64 3), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1225,9 +1075,7 @@ define <16 x i64> @vsll_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1263,9 +1111,7 @@ define <16 x i64> @vsll_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1275,9 +1121,7 @@ define <16 x i64> @vsll_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> splat (i64 3), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1287,10 +1131,6 @@ define <16 x i64> @vsll_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> splat (i64 3), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
index fc84e6ce3b6c7c..7ea5b1f0b505a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
@@ -39,9 +39,7 @@ define <2 x i8> @vsra_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <2 x i8> @vsra_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -77,9 +73,7 @@ define <2 x i8> @vsra_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,11 +83,7 @@ define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -115,9 +105,7 @@ define <4 x i8> @vsra_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -141,9 +129,7 @@ define <4 x i8> @vsra_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -153,9 +139,7 @@ define <4 x i8> @vsra_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -165,11 +149,7 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -203,9 +183,7 @@ define <8 x i8> @vsra_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -229,9 +207,7 @@ define <8 x i8> @vsra_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -241,9 +217,7 @@ define <8 x i8> @vsra_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -253,11 +227,7 @@ define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -279,9 +249,7 @@ define <16 x i8> @vsra_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -305,9 +273,7 @@ define <16 x i8> @vsra_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -317,9 +283,7 @@ define <16 x i8> @vsra_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -329,11 +293,7 @@ define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -355,9 +315,7 @@ define <2 x i16> @vsra_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -381,9 +339,7 @@ define <2 x i16> @vsra_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -393,9 +349,7 @@ define <2 x i16> @vsra_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -405,11 +359,7 @@ define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -431,9 +381,7 @@ define <4 x i16> @vsra_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -457,9 +405,7 @@ define <4 x i16> @vsra_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -469,9 +415,7 @@ define <4 x i16> @vsra_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -481,11 +425,7 @@ define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -507,9 +447,7 @@ define <8 x i16> @vsra_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -533,9 +471,7 @@ define <8 x i16> @vsra_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -545,9 +481,7 @@ define <8 x i16> @vsra_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -557,11 +491,7 @@ define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -583,9 +513,7 @@ define <16 x i16> @vsra_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -609,9 +537,7 @@ define <16 x i16> @vsra_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -621,9 +547,7 @@ define <16 x i16> @vsra_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -633,11 +557,7 @@ define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -659,9 +579,7 @@ define <2 x i32> @vsra_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -685,9 +603,7 @@ define <2 x i32> @vsra_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -697,9 +613,7 @@ define <2 x i32> @vsra_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -709,11 +623,7 @@ define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -735,9 +645,7 @@ define <4 x i32> @vsra_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -761,9 +669,7 @@ define <4 x i32> @vsra_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -773,9 +679,7 @@ define <4 x i32> @vsra_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -785,11 +689,7 @@ define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -811,9 +711,7 @@ define <8 x i32> @vsra_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -837,9 +735,7 @@ define <8 x i32> @vsra_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -849,9 +745,7 @@ define <8 x i32> @vsra_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -861,11 +755,7 @@ define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -887,9 +777,7 @@ define <16 x i32> @vsra_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -913,9 +801,7 @@ define <16 x i32> @vsra_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -925,9 +811,7 @@ define <16 x i32> @vsra_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -937,11 +821,7 @@ define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -963,9 +843,7 @@ define <2 x i64> @vsra_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1001,9 +879,7 @@ define <2 x i64> @vsra_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1013,9 +889,7 @@ define <2 x i64> @vsra_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1025,11 +899,7 @@ define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1051,9 +921,7 @@ define <4 x i64> @vsra_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1089,9 +957,7 @@ define <4 x i64> @vsra_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1101,9 +967,7 @@ define <4 x i64> @vsra_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1113,11 +977,7 @@ define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1139,9 +999,7 @@ define <8 x i64> @vsra_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1177,9 +1035,7 @@ define <8 x i64> @vsra_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1189,9 +1045,7 @@ define <8 x i64> @vsra_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1201,11 +1055,7 @@ define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1227,9 +1077,7 @@ define <16 x i64> @vsra_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1265,9 +1113,7 @@ define <16 x i64> @vsra_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1277,9 +1123,7 @@ define <16 x i64> @vsra_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1289,10 +1133,6 @@ define <16 x i64> @vsra_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
index ba3287f744626a..9f9d4af0cc2f3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vsrl_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vsrl_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -76,9 +72,7 @@ define <2 x i8> @vsrl_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -88,11 +82,7 @@ define <2 x i8> @vsrl_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -114,9 +104,7 @@ define <4 x i8> @vsrl_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -140,9 +128,7 @@ define <4 x i8> @vsrl_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -152,9 +138,7 @@ define <4 x i8> @vsrl_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -164,11 +148,7 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -202,9 +182,7 @@ define <8 x i8> @vsrl_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -228,9 +206,7 @@ define <8 x i8> @vsrl_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -240,9 +216,7 @@ define <8 x i8> @vsrl_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -252,11 +226,7 @@ define <8 x i8> @vsrl_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -278,9 +248,7 @@ define <16 x i8> @vsrl_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -304,9 +272,7 @@ define <16 x i8> @vsrl_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -316,9 +282,7 @@ define <16 x i8> @vsrl_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -328,11 +292,7 @@ define <16 x i8> @vsrl_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -354,9 +314,7 @@ define <2 x i16> @vsrl_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -380,9 +338,7 @@ define <2 x i16> @vsrl_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -392,9 +348,7 @@ define <2 x i16> @vsrl_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -404,11 +358,7 @@ define <2 x i16> @vsrl_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -430,9 +380,7 @@ define <4 x i16> @vsrl_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -456,9 +404,7 @@ define <4 x i16> @vsrl_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -468,9 +414,7 @@ define <4 x i16> @vsrl_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -480,11 +424,7 @@ define <4 x i16> @vsrl_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -506,9 +446,7 @@ define <8 x i16> @vsrl_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -532,9 +470,7 @@ define <8 x i16> @vsrl_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -544,9 +480,7 @@ define <8 x i16> @vsrl_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -556,11 +490,7 @@ define <8 x i16> @vsrl_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -582,9 +512,7 @@ define <16 x i16> @vsrl_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -608,9 +536,7 @@ define <16 x i16> @vsrl_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -620,9 +546,7 @@ define <16 x i16> @vsrl_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -632,11 +556,7 @@ define <16 x i16> @vsrl_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -658,9 +578,7 @@ define <2 x i32> @vsrl_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -684,9 +602,7 @@ define <2 x i32> @vsrl_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -696,9 +612,7 @@ define <2 x i32> @vsrl_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -708,11 +622,7 @@ define <2 x i32> @vsrl_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -734,9 +644,7 @@ define <4 x i32> @vsrl_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -760,9 +668,7 @@ define <4 x i32> @vsrl_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -772,9 +678,7 @@ define <4 x i32> @vsrl_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -784,11 +688,7 @@ define <4 x i32> @vsrl_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -810,9 +710,7 @@ define <8 x i32> @vsrl_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -836,9 +734,7 @@ define <8 x i32> @vsrl_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -848,9 +744,7 @@ define <8 x i32> @vsrl_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -860,11 +754,7 @@ define <8 x i32> @vsrl_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -886,9 +776,7 @@ define <16 x i32> @vsrl_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -912,9 +800,7 @@ define <16 x i32> @vsrl_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -924,9 +810,7 @@ define <16 x i32> @vsrl_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -936,11 +820,7 @@ define <16 x i32> @vsrl_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -962,9 +842,7 @@ define <2 x i64> @vsrl_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1000,9 +878,7 @@ define <2 x i64> @vsrl_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1012,9 +888,7 @@ define <2 x i64> @vsrl_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1024,11 +898,7 @@ define <2 x i64> @vsrl_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1050,9 +920,7 @@ define <4 x i64> @vsrl_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1088,9 +956,7 @@ define <4 x i64> @vsrl_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1100,9 +966,7 @@ define <4 x i64> @vsrl_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1112,11 +976,7 @@ define <4 x i64> @vsrl_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1138,9 +998,7 @@ define <8 x i64> @vsrl_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1176,9 +1034,7 @@ define <8 x i64> @vsrl_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1188,9 +1044,7 @@ define <8 x i64> @vsrl_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1200,11 +1054,7 @@ define <8 x i64> @vsrl_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1226,9 +1076,7 @@ define <16 x i64> @vsrl_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1264,9 +1112,7 @@ define <16 x i64> @vsrl_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1276,9 +1122,7 @@ define <16 x i64> @vsrl_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1288,10 +1132,6 @@ define <16 x i64> @vsrl_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 32b8d10d87173d..2caa2ff41a7d93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -43,9 +43,7 @@ define <2 x i8> @vssub_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -69,9 +67,7 @@ define <2 x i8> @vssub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -82,9 +78,7 @@ define <2 x i8> @vssub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -95,11 +89,7 @@ define <2 x i8> @vssub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -121,9 +111,7 @@ define <4 x i8> @vssub_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -161,9 +149,7 @@ define <4 x i8> @vssub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -174,9 +160,7 @@ define <4 x i8> @vssub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -187,11 +171,7 @@ define <4 x i8> @vssub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -213,9 +193,7 @@ define <5 x i8> @vssub_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -239,9 +217,7 @@ define <5 x i8> @vssub_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -252,9 +228,7 @@ define <5 x i8> @vssub_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -265,11 +239,7 @@ define <5 x i8> @vssub_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -291,9 +261,7 @@ define <8 x i8> @vssub_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -317,9 +285,7 @@ define <8 x i8> @vssub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -330,9 +296,7 @@ define <8 x i8> @vssub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -343,11 +307,7 @@ define <8 x i8> @vssub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -369,9 +329,7 @@ define <16 x i8> @vssub_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -395,9 +353,7 @@ define <16 x i8> @vssub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -408,9 +364,7 @@ define <16 x i8> @vssub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -421,11 +375,7 @@ define <16 x i8> @vssub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -453,9 +403,7 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -478,11 +426,7 @@ define <256 x i8> @vssub_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v16, v16, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -500,9 +444,7 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -520,9 +462,7 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -544,9 +484,7 @@ define <2 x i16> @vssub_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -570,9 +508,7 @@ define <2 x i16> @vssub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -583,9 +519,7 @@ define <2 x i16> @vssub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -596,11 +530,7 @@ define <2 x i16> @vssub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -622,9 +552,7 @@ define <4 x i16> @vssub_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -648,9 +576,7 @@ define <4 x i16> @vssub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -661,9 +587,7 @@ define <4 x i16> @vssub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -674,11 +598,7 @@ define <4 x i16> @vssub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -700,9 +620,7 @@ define <8 x i16> @vssub_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -726,9 +644,7 @@ define <8 x i16> @vssub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -739,9 +655,7 @@ define <8 x i16> @vssub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -752,11 +666,7 @@ define <8 x i16> @vssub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -778,9 +688,7 @@ define <16 x i16> @vssub_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -804,9 +712,7 @@ define <16 x i16> @vssub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -817,9 +723,7 @@ define <16 x i16> @vssub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -830,11 +734,7 @@ define <16 x i16> @vssub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -856,9 +756,7 @@ define <2 x i32> @vssub_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -882,9 +780,7 @@ define <2 x i32> @vssub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -895,9 +791,7 @@ define <2 x i32> @vssub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -908,11 +802,7 @@ define <2 x i32> @vssub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -934,9 +824,7 @@ define <4 x i32> @vssub_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -960,9 +848,7 @@ define <4 x i32> @vssub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -973,9 +859,7 @@ define <4 x i32> @vssub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -986,11 +870,7 @@ define <4 x i32> @vssub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1012,9 +892,7 @@ define <8 x i32> @vssub_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1038,9 +916,7 @@ define <8 x i32> @vssub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1051,9 +927,7 @@ define <8 x i32> @vssub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1064,11 +938,7 @@ define <8 x i32> @vssub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1090,9 +960,7 @@ define <16 x i32> @vssub_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1116,9 +984,7 @@ define <16 x i32> @vssub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1129,9 +995,7 @@ define <16 x i32> @vssub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1142,11 +1006,7 @@ define <16 x i32> @vssub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1168,9 +1028,7 @@ define <2 x i64> @vssub_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1222,9 +1080,7 @@ define <2 x i64> @vssub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1235,9 +1091,7 @@ define <2 x i64> @vssub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1248,11 +1102,7 @@ define <2 x i64> @vssub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1274,9 +1124,7 @@ define <4 x i64> @vssub_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1328,9 +1176,7 @@ define <4 x i64> @vssub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1341,9 +1187,7 @@ define <4 x i64> @vssub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1354,11 +1198,7 @@ define <4 x i64> @vssub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1380,9 +1220,7 @@ define <8 x i64> @vssub_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1434,9 +1272,7 @@ define <8 x i64> @vssub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1447,9 +1283,7 @@ define <8 x i64> @vssub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1460,11 +1294,7 @@ define <8 x i64> @vssub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1486,9 +1316,7 @@ define <16 x i64> @vssub_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1540,9 +1368,7 @@ define <16 x i64> @vssub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1553,9 +1379,7 @@ define <16 x i64> @vssub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1566,11 +1390,7 @@ define <16 x i64> @vssub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1582,16 +1402,16 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vssub_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1624,24 +1444,22 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vssub_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1670,11 +1488,7 @@ define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vssub.vx v16, v16, a2
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1706,9 +1520,7 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1738,8 +1550,6 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
index 941be4aba1b9fc..efe28eb9021ce2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
@@ -35,9 +35,7 @@ define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
+  %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 1))
   ret <2 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
+  %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 1))
   ret <4 x i8> %v
 }
 
@@ -109,9 +105,7 @@ define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
+  %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 1))
   ret <8 x i8> %v
 }
 
@@ -146,9 +140,7 @@ define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
+  %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 1))
   ret <16 x i8> %v
 }
 
@@ -183,9 +175,7 @@ define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
+  %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 1))
   ret <2 x i16> %v
 }
 
@@ -220,9 +210,7 @@ define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
+  %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 1))
   ret <4 x i16> %v
 }
 
@@ -257,9 +245,7 @@ define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
+  %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 1))
   ret <8 x i16> %v
 }
 
@@ -294,9 +280,7 @@ define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
+  %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 1))
   ret <16 x i16> %v
 }
 
@@ -331,9 +315,7 @@ define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
+  %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 1))
   ret <2 x i32> %v
 }
 
@@ -368,9 +350,7 @@ define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
+  %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 1))
   ret <4 x i32> %v
 }
 
@@ -405,9 +385,7 @@ define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
+  %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 1))
   ret <8 x i32> %v
 }
 
@@ -442,9 +420,7 @@ define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
+  %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 1))
   ret <16 x i32> %v
 }
 
@@ -492,9 +468,7 @@ define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
+  %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 1))
   ret <2 x i64> %v
 }
 
@@ -542,9 +516,7 @@ define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
+  %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 1))
   ret <4 x i64> %v
 }
 
@@ -592,9 +564,7 @@ define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
+  %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 1))
   ret <8 x i64> %v
 }
 
@@ -642,8 +612,6 @@ define <16 x i64> @ssub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
+  %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 1))
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 60c16ef543a0ff..6313f31bc1a615 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -38,9 +38,7 @@ define <2 x i8> @vssubu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -64,9 +62,7 @@ define <2 x i8> @vssubu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -77,9 +73,7 @@ define <2 x i8> @vssubu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -90,11 +84,7 @@ define <2 x i8> @vssubu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -116,9 +106,7 @@ define <4 x i8> @vssubu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -156,9 +144,7 @@ define <4 x i8> @vssubu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -169,9 +155,7 @@ define <4 x i8> @vssubu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -182,11 +166,7 @@ define <4 x i8> @vssubu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -208,9 +188,7 @@ define <5 x i8> @vssubu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -234,9 +212,7 @@ define <5 x i8> @vssubu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -247,9 +223,7 @@ define <5 x i8> @vssubu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -260,11 +234,7 @@ define <5 x i8> @vssubu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %head = insertelement <5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -286,9 +256,7 @@ define <8 x i8> @vssubu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -312,9 +280,7 @@ define <8 x i8> @vssubu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -325,9 +291,7 @@ define <8 x i8> @vssubu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -338,11 +302,7 @@ define <8 x i8> @vssubu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -364,9 +324,7 @@ define <16 x i8> @vssubu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -390,9 +348,7 @@ define <16 x i8> @vssubu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -403,9 +359,7 @@ define <16 x i8> @vssubu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -416,11 +370,7 @@ define <16 x i8> @vssubu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -448,9 +398,7 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -473,11 +421,7 @@ define <256 x i8> @vssubu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v16, v16, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %head = insertelement <256 x i1> poison, i1 true, i32 0
-  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -495,9 +439,7 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -515,9 +457,7 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -539,9 +479,7 @@ define <2 x i16> @vssubu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -565,9 +503,7 @@ define <2 x i16> @vssubu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -578,9 +514,7 @@ define <2 x i16> @vssubu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -591,11 +525,7 @@ define <2 x i16> @vssubu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -617,9 +547,7 @@ define <4 x i16> @vssubu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -643,9 +571,7 @@ define <4 x i16> @vssubu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -656,9 +582,7 @@ define <4 x i16> @vssubu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -669,11 +593,7 @@ define <4 x i16> @vssubu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -695,9 +615,7 @@ define <8 x i16> @vssubu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -721,9 +639,7 @@ define <8 x i16> @vssubu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -734,9 +650,7 @@ define <8 x i16> @vssubu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -747,11 +661,7 @@ define <8 x i16> @vssubu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -773,9 +683,7 @@ define <16 x i16> @vssubu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -799,9 +707,7 @@ define <16 x i16> @vssubu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -812,9 +718,7 @@ define <16 x i16> @vssubu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -825,11 +729,7 @@ define <16 x i16> @vssubu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -851,9 +751,7 @@ define <2 x i32> @vssubu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -877,9 +775,7 @@ define <2 x i32> @vssubu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -890,9 +786,7 @@ define <2 x i32> @vssubu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -903,11 +797,7 @@ define <2 x i32> @vssubu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -929,9 +819,7 @@ define <4 x i32> @vssubu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -955,9 +843,7 @@ define <4 x i32> @vssubu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -968,9 +854,7 @@ define <4 x i32> @vssubu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -981,11 +865,7 @@ define <4 x i32> @vssubu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1007,9 +887,7 @@ define <8 x i32> @vssubu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1033,9 +911,7 @@ define <8 x i32> @vssubu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1046,9 +922,7 @@ define <8 x i32> @vssubu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1059,11 +933,7 @@ define <8 x i32> @vssubu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1085,9 +955,7 @@ define <16 x i32> @vssubu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1111,9 +979,7 @@ define <16 x i32> @vssubu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1124,9 +990,7 @@ define <16 x i32> @vssubu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1137,11 +1001,7 @@ define <16 x i32> @vssubu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1163,9 +1023,7 @@ define <2 x i64> @vssubu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1217,9 +1075,7 @@ define <2 x i64> @vssubu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1230,9 +1086,7 @@ define <2 x i64> @vssubu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1243,11 +1097,7 @@ define <2 x i64> @vssubu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1269,9 +1119,7 @@ define <4 x i64> @vssubu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1323,9 +1171,7 @@ define <4 x i64> @vssubu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1336,9 +1182,7 @@ define <4 x i64> @vssubu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1349,11 +1193,7 @@ define <4 x i64> @vssubu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1375,9 +1215,7 @@ define <8 x i64> @vssubu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1429,9 +1267,7 @@ define <8 x i64> @vssubu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1442,9 +1278,7 @@ define <8 x i64> @vssubu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1455,11 +1289,7 @@ define <8 x i64> @vssubu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1481,9 +1311,7 @@ define <16 x i64> @vssubu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1535,9 +1363,7 @@ define <16 x i64> @vssubu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1548,9 +1374,7 @@ define <16 x i64> @vssubu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1561,11 +1385,7 @@ define <16 x i64> @vssubu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1577,16 +1397,16 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV32-LABEL: vssubu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
+; RV32-NEXT:    vslidedown.vi v7, v0, 2
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1619,24 +1439,22 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vssubu_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a1, 32
-; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
+; RV32-NEXT:    li a2, 32
+; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1665,11 +1483,7 @@ define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vssubu.vx v16, v16, a2
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %head = insertelement <32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1701,9 +1515,7 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1733,8 +1545,6 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
index f31ee23fe7dcd1..dc9279f6e7fa09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
@@ -35,9 +35,7 @@ define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb)
+  %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 2))
   ret <2 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb)
+  %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 2))
   ret <4 x i8> %v
 }
 
@@ -109,9 +105,7 @@ define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb)
+  %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 2))
   ret <8 x i8> %v
 }
 
@@ -146,9 +140,7 @@ define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb)
+  %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 2))
   ret <16 x i8> %v
 }
 
@@ -183,9 +175,7 @@ define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb)
+  %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 2))
   ret <2 x i16> %v
 }
 
@@ -220,9 +210,7 @@ define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb)
+  %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 2))
   ret <4 x i16> %v
 }
 
@@ -257,9 +245,7 @@ define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb)
+  %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 2))
   ret <8 x i16> %v
 }
 
@@ -294,9 +280,7 @@ define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb)
+  %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 2))
   ret <16 x i16> %v
 }
 
@@ -331,9 +315,7 @@ define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb)
+  %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 2))
   ret <2 x i32> %v
 }
 
@@ -368,9 +350,7 @@ define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb)
+  %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 2))
   ret <4 x i32> %v
 }
 
@@ -405,9 +385,7 @@ define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb)
+  %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 2))
   ret <8 x i32> %v
 }
 
@@ -442,9 +420,7 @@ define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb)
+  %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 2))
   ret <16 x i32> %v
 }
 
@@ -492,9 +468,7 @@ define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb)
+  %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 2))
   ret <2 x i64> %v
 }
 
@@ -542,9 +516,7 @@ define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb)
+  %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 2))
   ret <4 x i64> %v
 }
 
@@ -592,9 +564,7 @@ define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb)
+  %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 2))
   ret <8 x i64> %v
 }
 
@@ -642,8 +612,6 @@ define <16 x i64> @usub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb)
+  %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 2))
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
index 1c12614940082f..6052c9ee20fe10 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vsub_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -60,9 +58,7 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,9 +80,7 @@ define <3 x i8> @vsub_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <3 x i1> %head, <3 x i1> poison, <3 x i32> zeroinitializer
-  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> splat (i1 true), i32 %evl)
   ret <3 x i8> %v
 }
 
@@ -110,9 +104,7 @@ define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <3 x i8> %elt.head, <3 x i8> poison, <3 x i32> zeroinitializer
-  %head = insertelement <3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <3 x i1> %head, <3 x i1> poison, <3 x i32> zeroinitializer
-  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> %m, i32 %evl)
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> splat (i1 true), i32 %evl)
   ret <3 x i8> %v
 }
 
@@ -134,9 +126,7 @@ define <4 x i8> @vsub_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -160,9 +150,7 @@ define <4 x i8> @vsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -184,9 +172,7 @@ define <8 x i8> @vsub_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -210,9 +196,7 @@ define <8 x i8> @vsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -234,9 +218,7 @@ define <16 x i8> @vsub_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -260,9 +242,7 @@ define <16 x i8> @vsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -284,9 +264,7 @@ define <2 x i16> @vsub_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -310,9 +288,7 @@ define <2 x i16> @vsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -334,9 +310,7 @@ define <4 x i16> @vsub_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -360,9 +334,7 @@ define <4 x i16> @vsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -384,9 +356,7 @@ define <8 x i16> @vsub_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -410,9 +380,7 @@ define <8 x i16> @vsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -434,9 +402,7 @@ define <16 x i16> @vsub_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -460,9 +426,7 @@ define <16 x i16> @vsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -484,9 +448,7 @@ define <2 x i32> @vsub_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -510,9 +472,7 @@ define <2 x i32> @vsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -534,9 +494,7 @@ define <4 x i32> @vsub_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -560,9 +518,7 @@ define <4 x i32> @vsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -584,9 +540,7 @@ define <8 x i32> @vsub_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -610,9 +564,7 @@ define <8 x i32> @vsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -634,9 +586,7 @@ define <16 x i32> @vsub_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -660,9 +610,7 @@ define <16 x i32> @vsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -684,9 +632,7 @@ define <2 x i64> @vsub_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -738,9 +684,7 @@ define <2 x i64> @vsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -762,9 +706,7 @@ define <4 x i64> @vsub_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -816,9 +758,7 @@ define <4 x i64> @vsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -840,9 +780,7 @@ define <8 x i64> @vsub_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -894,9 +832,7 @@ define <8 x i64> @vsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -918,9 +854,7 @@ define <16 x i64> @vsub_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -972,8 +906,6 @@ define <16 x i64> @vsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
index 68cffa98cd3d4b..16487a0784125e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
@@ -34,9 +34,7 @@ define <2 x i8> @vxor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <2 x i8> @vxor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,9 +80,7 @@ define <2 x i8> @vxor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -96,11 +90,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -110,9 +100,7 @@ define <2 x i8> @vxor_vi_v2i8_1(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -122,11 +110,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -148,9 +132,7 @@ define <4 x i8> @vxor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -174,9 +156,7 @@ define <4 x i8> @vxor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -186,9 +166,7 @@ define <4 x i8> @vxor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -198,11 +176,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -212,9 +186,7 @@ define <4 x i8> @vxor_vi_v4i8_1(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -224,11 +196,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -250,9 +218,7 @@ define <8 x i8> @vxor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -276,9 +242,7 @@ define <8 x i8> @vxor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -288,9 +252,7 @@ define <8 x i8> @vxor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -300,11 +262,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -314,9 +272,7 @@ define <8 x i8> @vxor_vi_v8i8_1(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -326,11 +282,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -352,9 +304,7 @@ define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <9 x i1> poison, i1 true, i32 0
-  %m = shufflevector <9 x i1> %head, <9 x i1> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> splat (i1 true), i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -378,9 +328,7 @@ define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
-  %head = insertelement <9 x i1> poison, i1 true, i32 0
-  %m = shufflevector <9 x i1> %head, <9 x i1> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> splat (i1 true), i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -390,9 +338,7 @@ define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <9 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> %m, i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -402,11 +348,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <9 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
-  %head = insertelement <9 x i1> poison, i1 true, i32 0
-  %m = shufflevector <9 x i1> %head, <9 x i1> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> splat (i1 true), i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -416,9 +358,7 @@ define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> %m, i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -428,11 +368,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
-  %head = insertelement <9 x i1> poison, i1 true, i32 0
-  %m = shufflevector <9 x i1> %head, <9 x i1> poison, <9 x i32> zeroinitializer
-  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> splat (i1 true), i32 %evl)
   ret <9 x i8> %v
 }
 
@@ -454,9 +390,7 @@ define <16 x i8> @vxor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -480,9 +414,7 @@ define <16 x i8> @vxor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -492,9 +424,7 @@ define <16 x i8> @vxor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -504,11 +434,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -518,9 +444,7 @@ define <16 x i8> @vxor_vi_v16i8_1(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -530,11 +454,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -556,9 +476,7 @@ define <2 x i16> @vxor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -582,9 +500,7 @@ define <2 x i16> @vxor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -594,9 +510,7 @@ define <2 x i16> @vxor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -606,11 +520,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -620,9 +530,7 @@ define <2 x i16> @vxor_vi_v2i16_1(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -632,11 +540,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -658,9 +562,7 @@ define <4 x i16> @vxor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -684,9 +586,7 @@ define <4 x i16> @vxor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -696,9 +596,7 @@ define <4 x i16> @vxor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -708,11 +606,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -722,9 +616,7 @@ define <4 x i16> @vxor_vi_v4i16_1(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -734,11 +626,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -760,9 +648,7 @@ define <8 x i16> @vxor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -786,9 +672,7 @@ define <8 x i16> @vxor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -798,9 +682,7 @@ define <8 x i16> @vxor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -810,11 +692,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -824,9 +702,7 @@ define <8 x i16> @vxor_vi_v8i16_1(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -836,11 +712,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -862,9 +734,7 @@ define <16 x i16> @vxor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -888,9 +758,7 @@ define <16 x i16> @vxor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -900,9 +768,7 @@ define <16 x i16> @vxor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -912,11 +778,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -926,9 +788,7 @@ define <16 x i16> @vxor_vi_v16i16_1(<16 x i16> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -938,11 +798,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -964,9 +820,7 @@ define <2 x i32> @vxor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -990,9 +844,7 @@ define <2 x i32> @vxor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -1002,9 +854,7 @@ define <2 x i32> @vxor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -1014,11 +864,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -1028,9 +874,7 @@ define <2 x i32> @vxor_vi_v2i32_1(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -1040,11 +884,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -1066,9 +906,7 @@ define <4 x i32> @vxor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1092,9 +930,7 @@ define <4 x i32> @vxor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1104,9 +940,7 @@ define <4 x i32> @vxor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1116,11 +950,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1130,9 +960,7 @@ define <4 x i32> @vxor_vi_v4i32_1(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1142,11 +970,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -1168,9 +992,7 @@ define <8 x i32> @vxor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1194,9 +1016,7 @@ define <8 x i32> @vxor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1206,9 +1026,7 @@ define <8 x i32> @vxor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1218,11 +1036,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1232,9 +1046,7 @@ define <8 x i32> @vxor_vi_v8i32_1(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1244,11 +1056,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -1270,9 +1078,7 @@ define <16 x i32> @vxor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1296,9 +1102,7 @@ define <16 x i32> @vxor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1308,9 +1112,7 @@ define <16 x i32> @vxor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1320,11 +1122,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1334,9 +1132,7 @@ define <16 x i32> @vxor_vi_v16i32_1(<16 x i32> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1346,11 +1142,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1372,9 +1164,7 @@ define <2 x i64> @vxor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1426,9 +1216,7 @@ define <2 x i64> @vxor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1438,9 +1226,7 @@ define <2 x i64> @vxor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1450,11 +1236,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1464,9 +1246,7 @@ define <2 x i64> @vxor_vi_v2i64_1(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1476,11 +1256,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %head = insertelement <2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+  %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1502,9 +1278,7 @@ define <4 x i64> @vxor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1556,9 +1330,7 @@ define <4 x i64> @vxor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1568,9 +1340,7 @@ define <4 x i64> @vxor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1580,11 +1350,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1594,9 +1360,7 @@ define <4 x i64> @vxor_vi_v4i64_1(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1606,11 +1370,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %head = insertelement <4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+  %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1632,9 +1392,7 @@ define <8 x i64> @vxor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1686,9 +1444,7 @@ define <8 x i64> @vxor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1698,9 +1454,7 @@ define <8 x i64> @vxor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1710,11 +1464,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1724,9 +1474,7 @@ define <8 x i64> @vxor_vi_v8i64_1(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1736,11 +1484,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %head = insertelement <8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+  %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1762,9 +1506,7 @@ define <16 x i64> @vxor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1816,9 +1558,7 @@ define <16 x i64> @vxor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1828,9 +1568,7 @@ define <16 x i64> @vxor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1840,11 +1578,7 @@ define <16 x i64> @vxor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1854,9 +1588,7 @@ define <16 x i64> @vxor_vi_v16i64_1(<16 x i64> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1866,10 +1598,6 @@ define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %head = insertelement <16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+  %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll
index d734966830480c..d292978c1d5eb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll
@@ -22,7 +22,7 @@ define <4 x i16> @vzext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.zext.v4i16.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.zext.v4i16.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -46,7 +46,7 @@ define <4 x i32> @vzext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -70,6 +70,6 @@ define <4 x i64> @vzext_v4i64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i1(<4 x i1> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i1(<4 x i1> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
index e99a9b800d76d1..f4d679cd57cac9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
@@ -22,7 +22,7 @@ define <4 x i16> @vzext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.zext.v4i16.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i16> @llvm.vp.zext.v4i16.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -46,7 +46,7 @@ define <4 x i32> @vzext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf4 v9, v8
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -70,7 +70,7 @@ define <4 x i64> @vzext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf8 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i8(<4 x i8> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i8(<4 x i8> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -94,7 +94,7 @@ define <4 x i32> @vzext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf2 v9, v8
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i32> @llvm.vp.zext.v4i32.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -118,7 +118,7 @@ define <4 x i64> @vzext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf4 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i16(<4 x i16> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -142,7 +142,7 @@ define <4 x i64> @vzext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
+  %v = call <4 x i64> @llvm.vp.zext.v4i64.v4i32(<4 x i32> %va, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -197,7 +197,7 @@ define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
+  %v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index d1cd82d9f7c18f..9c4706b2bda764 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -42,9 +42,7 @@ define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -86,9 +84,7 @@ define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -130,9 +126,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -176,9 +170,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -222,9 +214,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -268,9 +258,7 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -312,9 +300,7 @@ define <vscale x 1 x float> @vp_floor_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.floor.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.floor.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -356,9 +342,7 @@ define <vscale x 2 x float> @vp_floor_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.floor.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.floor.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -402,9 +386,7 @@ define <vscale x 4 x float> @vp_floor_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.floor.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.floor.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -448,9 +430,7 @@ define <vscale x 8 x float> @vp_floor_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.floor.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.floor.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -494,9 +474,7 @@ define <vscale x 16 x float> @vp_floor_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.floor.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.floor.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -538,9 +516,7 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.floor.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.floor.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -584,9 +560,7 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -630,9 +604,7 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -676,9 +648,7 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -722,9 +692,7 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -837,8 +805,6 @@ define <vscale x 16 x double> @vp_floor_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 7774e3e4775af8..b78b8663eac90b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -76,9 +76,7 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -150,9 +148,7 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -226,9 +222,7 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -304,9 +298,7 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -408,9 +400,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -626,8 +616,6 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -635,6 +623,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v7, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -715,9 +705,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -751,9 +739,7 @@ define <vscale x 1 x float> @vfmax_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -787,9 +773,7 @@ define <vscale x 2 x float> @vfmax_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -825,9 +809,7 @@ define <vscale x 4 x float> @vfmax_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -863,9 +845,7 @@ define <vscale x 8 x float> @vfmax_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -899,9 +879,7 @@ define <vscale x 1 x double> @vfmax_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -937,9 +915,7 @@ define <vscale x 2 x double> @vfmax_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -975,9 +951,7 @@ define <vscale x 4 x double> @vfmax_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1026,9 +1000,7 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1238,8 +1210,6 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 4e98d0581f896d..69c76152910e86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -76,9 +76,7 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -150,9 +148,7 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -226,9 +222,7 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -304,9 +298,7 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -408,9 +400,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -626,8 +616,6 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -635,6 +623,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v7, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -715,9 +705,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -751,9 +739,7 @@ define <vscale x 1 x float> @vfmin_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -787,9 +773,7 @@ define <vscale x 2 x float> @vfmin_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -825,9 +809,7 @@ define <vscale x 4 x float> @vfmin_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -863,9 +845,7 @@ define <vscale x 8 x float> @vfmin_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -899,9 +879,7 @@ define <vscale x 1 x double> @vfmin_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -937,9 +915,7 @@ define <vscale x 2 x double> @vfmin_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -975,9 +951,7 @@ define <vscale x 4 x double> @vfmin_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1026,9 +1000,7 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1238,8 +1210,6 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll
index f773de3b518c6d..1d4d554d3a47d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-vp-fadd-and-vp-fmul.ll
@@ -38,9 +38,7 @@ define <vscale x 1 x double> @fma_true(<vscale x 1 x double> %x, <vscale x 1 x d
 ; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %true = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %true, i32 %vl)
+  %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> splat (i1 true), i32 %vl)
   %2 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
   ret <vscale x 1 x double> %2
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
index 9e2a33f54f420f..f8c1d5e45bc285 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
@@ -258,8 +258,6 @@ define <vscale x 2 x i8> @masked_load_allones_mask(ptr %a, <vscale x 2 x i8> %ma
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %maskedoff)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1), <vscale x 2 x i8> %maskedoff)
   ret <vscale x 2 x i8> %load
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
index a2fec5ab0798aa..32414feab722ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
@@ -258,8 +258,6 @@ define void @masked_store_allones_mask(<vscale x 2 x i8> %val, ptr %a) nounwind
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %insert = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %insert, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.v2i8.p0(<vscale x 2 x i8> %val, ptr %a, i32 1, <vscale x 2 x i1> splat (i1 1))
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 0b236f6d3ff388..e12f1cf7603b84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -206,9 +206,7 @@ define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vsc
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mtrue, <vscale x 4 x i8> %passthru)
+  %v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i8> %passthru)
   ret <vscale x 4 x i8> %v
 }
 
@@ -429,9 +427,7 @@ define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <v
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mtrue, <vscale x 4 x i16> %passthru)
+  %v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i16> %passthru)
   ret <vscale x 4 x i16> %v
 }
 
@@ -675,9 +671,7 @@ define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <v
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mtrue, <vscale x 4 x i32> %passthru)
+  %v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i32> %passthru)
   ret <vscale x 4 x i32> %v
 }
 
@@ -940,9 +934,7 @@ define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <v
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %mtrue, <vscale x 4 x i64> %passthru)
+  %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i64> %passthru)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1340,9 +1332,7 @@ define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mtrue, <vscale x 4 x half> %passthru)
+  %v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x half> %passthru)
   ret <vscale x 4 x half> %v
 }
 
@@ -1542,9 +1532,7 @@ define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs,
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mtrue, <vscale x 4 x float> %passthru)
+  %v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x float> %passthru)
   ret <vscale x 4 x float> %v
 }
 
@@ -1807,9 +1795,7 @@ define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs,
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %mtrue, <vscale x 4 x double> %passthru)
+  %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x double> %passthru)
   ret <vscale x 4 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 652e7a128a9606..0e09f59b6a20fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -139,9 +139,7 @@ define void @mscatter_truemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x ptr>
 ; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> %val, <vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> %val, <vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -300,9 +298,7 @@ define void @mscatter_truemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x pt
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> %val, <vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> %val, <vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -499,9 +495,7 @@ define void @mscatter_truemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x pt
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> %val, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> %val, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -737,9 +731,7 @@ define void @mscatter_truemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x pt
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> %val, <vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> %val, <vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -1041,9 +1033,7 @@ define void @mscatter_truemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x p
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> %val, <vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> %val, <vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -1221,9 +1211,7 @@ define void @mscatter_truemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x
 ; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
@@ -1459,9 +1447,7 @@ define void @mscatter_truemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x
 ; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.masked.scatter.nxv4f64.nxv4p0(<vscale x 4 x double> %val, <vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %mtrue)
+  call void @llvm.masked.scatter.nxv4f64.nxv4p0(<vscale x 4 x double> %val, <vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1))
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
index f608a63d6bb9b1..e47517abacb4d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll
@@ -18,9 +18,7 @@ define <vscale x 4 x i32> @test_vloxei(ptr %ptr, <vscale x 4 x i8> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -40,9 +38,7 @@ define <vscale x 4 x i32> @test_vloxei2(ptr %ptr, <vscale x 4 x i8> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 14, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 14)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -62,9 +58,7 @@ define <vscale x 4 x i32> @test_vloxei3(ptr %ptr, <vscale x 4 x i8> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 26, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 26)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -87,9 +81,7 @@ define <vscale x 4 x i32> @test_vloxei4(ptr %ptr, <vscale x 4 x i8> %offset, <vs
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = call <vscale x 4 x i64> @llvm.vp.zext.nxvi64.nxv1i8(<vscale x 4 x i8> %offset, <vscale x 4 x i1> %m, i32 %vl)
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %vl.i64 = zext i32 %vl to i64
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
@@ -116,9 +108,7 @@ define <vscale x 4 x i32> @test_vloxei5(ptr %ptr, <vscale x 4 x i8> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i16>
-  %shamt = insertelement <vscale x 4 x i16> undef, i16 12, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i16> %shamt, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i16> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i16> %offset.ext, splat (i16 12)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -141,9 +131,7 @@ define <vscale x 4 x i32> @test_vloxei6(ptr %ptr, <vscale x 4 x i7> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i7> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -164,9 +152,7 @@ define <vscale x 4 x i32> @test_vloxei7(ptr %ptr, <vscale x 4 x i1> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i1> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 2, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 2)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -194,9 +180,7 @@ define <vscale x 4 x i32> @test_vloxei_mask(ptr %ptr, <vscale x 4 x i8> %offset,
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %res = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -223,9 +207,7 @@ define <vscale x 4 x i32> @test_vluxei(ptr %ptr, <vscale x 4 x i8> %offset, i64
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %res = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -253,9 +235,7 @@ define <vscale x 4 x i32> @test_vluxei_mask(ptr %ptr, <vscale x 4 x i8> %offset,
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   %res = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     ptr %ptr,
@@ -282,9 +262,7 @@ define void @test_vsoxei(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %o
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
     ptr %ptr,
@@ -311,9 +289,7 @@ define void @test_vsoxei_mask(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
     ptr %ptr,
@@ -340,9 +316,7 @@ define void @test_vsuxei(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i8> %o
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
     ptr %ptr,
@@ -369,9 +343,7 @@ define void @test_vsuxei_mask(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i
 ; CHECK-NEXT:    ret
 entry:
   %offset.ext = zext <vscale x 4 x i8> %offset to <vscale x 4 x i64>
-  %shamt = insertelement <vscale x 4 x i64> undef, i64 4, i32 0
-  %shamt.vec = shufflevector <vscale x 4 x i64> %shamt, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %shl = shl <vscale x 4 x i64> %offset.ext, %shamt.vec
+  %shl = shl <vscale x 4 x i64> %offset.ext, splat (i64 4)
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %val,
     ptr %ptr,
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 126836cd9390b0..8bc2334282653f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -86,9 +86,7 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -432,9 +424,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -554,8 +544,6 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -563,6 +551,8 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -609,9 +599,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -653,9 +641,7 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.nearbyint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.nearbyint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -697,9 +683,7 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.nearbyint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.nearbyint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -743,9 +727,7 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -789,9 +771,7 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -835,9 +815,7 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -879,9 +857,7 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -925,9 +901,7 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -971,9 +945,7 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1017,9 +989,7 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1063,9 +1033,7 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1189,8 +1157,6 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 04761d4e7bfc4a..f934127f978dc9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -78,9 +78,7 @@ define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -154,9 +152,7 @@ define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -232,9 +228,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -312,9 +306,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -392,9 +384,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -507,8 +497,6 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -516,6 +504,8 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -558,9 +548,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -598,9 +586,7 @@ define <vscale x 1 x float> @vp_rint_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.rint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.rint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -638,9 +624,7 @@ define <vscale x 2 x float> @vp_rint_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.rint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.rint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -680,9 +664,7 @@ define <vscale x 4 x float> @vp_rint_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -722,9 +704,7 @@ define <vscale x 8 x float> @vp_rint_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -764,9 +744,7 @@ define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -804,9 +782,7 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -846,9 +822,7 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -888,9 +862,7 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -930,9 +902,7 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -972,9 +942,7 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1079,8 +1047,6 @@ define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index 16bd665dd0de98..eb4994914fad9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -86,9 +86,7 @@ define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -432,9 +424,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -555,8 +545,6 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -564,6 +552,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -610,9 +600,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -654,9 +642,7 @@ define <vscale x 1 x float> @vp_round_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.round.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.round.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -698,9 +684,7 @@ define <vscale x 2 x float> @vp_round_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.round.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.round.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -744,9 +728,7 @@ define <vscale x 4 x float> @vp_round_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -790,9 +772,7 @@ define <vscale x 8 x float> @vp_round_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -836,9 +816,7 @@ define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -880,9 +858,7 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -926,9 +902,7 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -972,9 +946,7 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1018,9 +990,7 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1064,9 +1034,7 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1179,8 +1147,6 @@ define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 429ddb6c71be34..f366a2922d079e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -86,9 +86,7 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -432,9 +424,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -555,8 +545,6 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -564,6 +552,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -610,9 +600,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -654,9 +642,7 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -698,9 +684,7 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -744,9 +728,7 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -790,9 +772,7 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -836,9 +816,7 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -880,9 +858,7 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -926,9 +902,7 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -972,9 +946,7 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1018,9 +990,7 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1064,9 +1034,7 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1179,8 +1147,6 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index c854e0fb8a05d9..79c940bdf089a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -86,9 +86,7 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -170,9 +168,7 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -256,9 +252,7 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -344,9 +338,7 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -432,9 +424,7 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x hal
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -555,8 +545,6 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -564,6 +552,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -610,9 +600,7 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -654,9 +642,7 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32_unmasked(<vscale x 1 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.roundtozero.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.roundtozero.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -698,9 +684,7 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32_unmasked(<vscale x 2 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.roundtozero.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.roundtozero.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -744,9 +728,7 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32_unmasked(<vscale x 4 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -790,9 +772,7 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32_unmasked(<vscale x 8 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -836,9 +816,7 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x fl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -880,9 +858,7 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -926,9 +902,7 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -972,9 +946,7 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1018,9 +990,7 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1064,9 +1034,7 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1179,8 +1147,6 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x d
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
index 5f381a307099dd..8cefbac59ce674 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
@@ -11,9 +11,7 @@ define <vscale x 2 x i32> @vpmerge_vadd(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
@@ -25,9 +23,7 @@ define <vscale x 2 x i32> @vpmerge_vsub(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
@@ -39,9 +35,7 @@ define <vscale x 2 x float> @vpmerge_vfadd(<vscale x 2 x float> %passthru, <vsca
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 7, i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
 
   ret <vscale x 2 x float> %b
 }
@@ -54,9 +48,7 @@ define <vscale x 2 x float> @vpmerge_vfsub(<vscale x 2 x float> %passthru, <vsca
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 7, i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x float> %b
 }
 declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64, i64, i64)
@@ -68,9 +60,7 @@ define <vscale x 2 x i32> @vpmerge_vwadd(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(<vscale x 2 x i32> %passthru, <vscale x 2 x i16> %x, <vscale x 2 x i16> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
@@ -86,9 +76,7 @@ define <vscale x 2 x i32> @vpmerge_vle(<vscale x 2 x i32> %passthru, ptr %p, <vs
     ptr %p,
     <vscale x 2 x i1> %m,
     i64 %vl, i64 1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64, i64)
@@ -101,9 +89,7 @@ define <vscale x 2 x i32> @vpmerge_vslideup(<vscale x 2 x i32> %passthru, <vscal
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -115,9 +101,7 @@ define <vscale x 2 x i32> @vpmerge_vslidedown(<vscale x 2 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i64 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -129,9 +113,7 @@ define <vscale x 2 x i32> @vpmerge_vslide1up(<vscale x 2 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -143,9 +125,7 @@ define <vscale x 2 x i32> @vpmerge_vslide1down(<vscale x 2 x i32> %passthru, <vs
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, i32 %x, <vscale x 2 x i1> %m, i64 %vl, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -159,9 +139,7 @@ define <vscale x 2 x i32> @vmerge_smaller_vl_same_passthru(<vscale x 2 x i32> %p
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 2)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2)
   ret <vscale x 2 x i32> %b
 }
 
@@ -173,9 +151,7 @@ define <vscale x 2 x i32> @vmerge_larger_vl_same_passthru(<vscale x 2 x i32> %pa
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 3)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3)
   ret <vscale x 2 x i32> %b
 }
 
@@ -190,9 +166,7 @@ define <vscale x 2 x i32> @vmerge_smaller_vl_different_passthru(<vscale x 2 x i3
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 2)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2)
   ret <vscale x 2 x i32> %b
 }
 
@@ -207,9 +181,7 @@ define <vscale x 2 x i32> @vmerge_larger_vl_different_passthru(<vscale x 2 x i32
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt1, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 3)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %pt2, <vscale x 2 x i32> %pt2, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3)
   ret <vscale x 2 x i32> %b
 }
 
@@ -221,9 +193,7 @@ define <vscale x 2 x i32> @vmerge_smaller_vl_poison_passthru(<vscale x 2 x i32>
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 3, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 2)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 2)
   ret <vscale x 2 x i32> %b
 }
 
@@ -235,9 +205,7 @@ define <vscale x 2 x i32> @vmerge_larger_vl_poison_passthru(<vscale x 2 x i32> %
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %m, i64 2, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 3)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 3)
   ret <vscale x 2 x i32> %b
 }
 
@@ -269,8 +237,6 @@ define <vscale x 2 x i32> @vpmerge_viota(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, <vscale x 2 x i1> %m, i64 %1, i64 0)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %1)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %1)
   ret <vscale x 2 x i32> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
index 52bd15742ef4bb..31fd5bdbd31fd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops-mir.ll
@@ -19,9 +19,7 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
   ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 8)
   ; CHECK-NEXT:   VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
   ; CHECK-NEXT:   PseudoRET
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   store <vscale x 2 x i32> %b, ptr %p
   ret void
@@ -40,9 +38,7 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
   ; CHECK-NEXT:   [[PseudoVLE32_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 5 /* e32 */, 1 /* ta, mu */ :: (load unknown-size from %ir.p, align 8)
   ; CHECK-NEXT:   VS1R_V killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]] :: (store (<vscale x 1 x s64>) into %ir.p)
   ; CHECK-NEXT:   PseudoRET
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   store <vscale x 2 x i32> %b, ptr %p
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 7cc4a9da3d4294..970581b4d80a9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -14,9 +14,7 @@ define <vscale x 2 x i32> @vpmerge_vpadd(<vscale x 2 x i32> %passthru, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -31,10 +29,8 @@ define <vscale x 2 x i32> @vpmerge_vpadd2(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
-  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -46,10 +42,8 @@ define <vscale x 2 x i32> @vpmerge_vpadd3(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
-  %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -61,9 +55,7 @@ define <vscale x 2 x float> @vpmerge_vpfadd(<vscale x 2 x float> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -90,9 +82,7 @@ define <vscale x 2 x i16> @vpmerge_vpfptosi(<vscale x 2 x i16> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
   ret <vscale x 2 x i16> %b
 }
@@ -105,9 +95,7 @@ define <vscale x 2 x float> @vpmerge_vpsitofp(<vscale x 2 x float> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -120,9 +108,7 @@ define <vscale x 2 x i32> @vpmerge_vpzext(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -135,9 +121,7 @@ define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -150,9 +134,7 @@ define <vscale x 2 x double> @vpmerge_vpfpext(<vscale x 2 x double> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
   ret <vscale x 2 x double> %b
 }
@@ -165,9 +147,7 @@ define <vscale x 2 x float> @vpmerge_vpfptrunc(<vscale x 2 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -180,9 +160,7 @@ define <vscale x 2 x i32> @vpmerge_vpload(<vscale x 2 x i32> %passthru, ptr %p,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -196,10 +174,8 @@ define <vscale x 2 x i32> @vpmerge_vpload2(<vscale x 2 x i32> %passthru, ptr %p,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
-  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -212,9 +188,7 @@ define void @vpmerge_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   store <vscale x 2 x i32> %b, ptr %p
   ret void
@@ -304,9 +278,7 @@ define <vscale x 2 x i32> @vpmerge_viota2(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    ret
   %1 = zext i32 %vl to i64
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i1> %vm, i64 %1)
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %true = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %true, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+  %b = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -494,9 +466,7 @@ define <vscale x 2 x i32> @vpselect_vpadd(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -509,10 +479,8 @@ define <vscale x 2 x i32> @vpselect_vpadd2(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
-  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -524,10 +492,8 @@ define <vscale x 2 x i32> @vpselect_vpadd3(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> %mask, i32 %vl)
-  %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -538,9 +504,7 @@ define <vscale x 2 x float> @vpselect_vpfadd(<vscale x 2 x float> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -565,9 +529,7 @@ define <vscale x 2 x i16> @vpselect_vpfptosi(<vscale x 2 x i16> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %a, <vscale x 2 x i16> %passthru, i32 %vl)
   ret <vscale x 2 x i16> %b
 }
@@ -579,9 +541,7 @@ define <vscale x 2 x float> @vpselect_vpsitofp(<vscale x 2 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -593,9 +553,7 @@ define <vscale x 2 x i32> @vpselect_vpzext(<vscale x 2 x i32> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -607,9 +565,7 @@ define <vscale x 2 x i32> @vpselect_vptrunc(<vscale x 2 x i32> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 0, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -621,9 +577,7 @@ define <vscale x 2 x double> @vpselect_vpfpext(<vscale x 2 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %a, <vscale x 2 x double> %passthru, i32 %vl)
   ret <vscale x 2 x double> %b
 }
@@ -635,9 +589,7 @@ define <vscale x 2 x float> @vpselect_vpfptrunc(<vscale x 2 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %a, <vscale x 2 x float> %passthru, i32 %vl)
   ret <vscale x 2 x float> %b
 }
@@ -649,9 +601,7 @@ define <vscale x 2 x i32> @vpselect_vpload(<vscale x 2 x i32> %passthru, ptr %p,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -664,10 +614,8 @@ define <vscale x 2 x i32> @vpselect_vpload2(<vscale x 2 x i32> %passthru, ptr %p
 ; CHECK-NEXT:    vmseq.vv v0, v9, v10
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
-  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
+  %m = call <vscale x 2 x i1> @llvm.vp.icmp.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, metadata !"eq", <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   ret <vscale x 2 x i32> %b
 }
@@ -680,9 +628,7 @@ define void @vpselect_vpload_store(<vscale x 2 x i32> %passthru, ptr %p, <vscale
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> %mask, i32 %vl)
+  %a = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr %p, <vscale x 2 x i1> splat (i1 -1), i32 %vl)
   %b = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %a, <vscale x 2 x i32> %passthru, i32 %vl)
   store <vscale x 2 x i32> %b, ptr %p
   ret void
@@ -1052,15 +998,12 @@ define <vscale x 2 x i32> @vredsum_allones_mask(<vscale x 2 x i32> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %passthru,
     <vscale x 2 x i32> %x,
     <vscale x 2 x i32> %y,
     i64 %vl)
-  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %passthru, <vscale x 2 x i32> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x i32> %b
 }
 
@@ -1072,15 +1015,12 @@ define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passth
 ; CHECK-NEXT:    vfredusum.vs v8, v9, v10
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-
   %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %passthru,
     <vscale x 2 x float> %x,
     <vscale x 2 x float> %y,
     i64 0, i64 %vl)
-  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %mask, i64 %vl)
+  %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i64 %vl)
   ret <vscale x 2 x float> %b
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll
index bd431186f05623..3aeb4e864627cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vmv.ll
@@ -7,9 +7,7 @@ define <vscale x 1 x i8> @vpmerge_mf8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i8 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %1 = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %x, i32 %vl)
+  %1 = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %x, i32 %vl)
   ret <vscale x 1 x i8> %1
 }
 
@@ -19,9 +17,7 @@ define <vscale x 2 x i8> @vpmerge_mf4(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i8 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %1 = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %x, i32 %vl)
+  %1 = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %x, i32 %vl)
   ret <vscale x 2 x i8> %1
 }
 
@@ -31,9 +27,7 @@ define <vscale x 4 x i8> @vpmerge_mf2(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i8 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %1 = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %x, i32 %vl)
+  %1 = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %x, i32 %vl)
   ret <vscale x 4 x i8> %1
 }
 
@@ -43,9 +37,7 @@ define <vscale x 8 x i8> @vpmerge_m1(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i8 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %x, i32 %vl)
+  %1 = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %x, i32 %vl)
   ret <vscale x 8 x i8> %1
 }
 
@@ -55,9 +47,7 @@ define <vscale x 8 x i16> @vpmerge_m2(<vscale x 8 x i16> %x, <vscale x 8 x i16>
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i16 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %x, i32 %vl)
+  %1 = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %x, i32 %vl)
   ret <vscale x 8 x i16> %1
 }
 
@@ -67,9 +57,7 @@ define <vscale x 8 x i32> @vpmerge_m4(<vscale x 8 x i32> %x, <vscale x 8 x i32>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %x, i32 %vl)
+  %1 = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %x, i32 %vl)
   ret <vscale x 8 x i32> %1
 }
 
@@ -79,9 +67,7 @@ define <vscale x 8 x i64> @vpmerge_m8(<vscale x 8 x i64> %x, <vscale x 8 x i64>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i64 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %x, i32 %vl)
+  %1 = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %x, i32 %vl)
   ret <vscale x 8 x i64> %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index b1f1a4dceccfb9..7fd77c050b2957 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -49,9 +49,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -61,9 +59,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -107,9 +103,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -119,9 +113,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -165,9 +157,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -177,9 +167,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -225,9 +213,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -237,9 +223,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -283,9 +267,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -295,9 +277,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -341,9 +321,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -353,9 +331,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -401,9 +377,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -413,9 +387,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -459,9 +431,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -471,9 +441,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -519,9 +487,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -531,9 +497,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_swap_nxv1i8(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i8(<vscale x 1 x i8> splat (i8 4), <vscale x 1 x i8> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -665,9 +629,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -677,9 +639,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -723,9 +683,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -735,9 +693,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -781,9 +737,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -793,9 +747,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -841,9 +793,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -853,9 +803,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -899,9 +847,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -911,9 +857,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -957,9 +901,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -969,9 +911,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1017,9 +957,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1029,9 +967,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1075,9 +1011,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1087,9 +1021,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1135,9 +1067,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1147,9 +1077,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_swap_nxv8i8(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i8(<vscale x 8 x i8> splat (i8 4), <vscale x 8 x i8> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1312,9 +1240,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1324,9 +1250,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1370,9 +1294,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1382,9 +1304,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1428,9 +1348,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1440,9 +1358,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1488,9 +1404,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1500,9 +1414,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1546,9 +1458,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1558,9 +1468,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1604,9 +1512,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1616,9 +1522,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1664,9 +1568,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1676,9 +1578,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1722,9 +1622,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1734,9 +1632,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1782,9 +1678,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1794,9 +1688,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_swap_nxv1i32(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i32(<vscale x 1 x i32> splat (i32 4), <vscale x 1 x i32> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -1846,9 +1738,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vmseq.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1859,9 +1749,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vscal
 ; CHECK-NEXT:    vmseq.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1909,9 +1797,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vmsne.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1922,9 +1808,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vscal
 ; CHECK-NEXT:    vmsne.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1972,9 +1856,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -1985,9 +1867,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2037,9 +1917,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2050,9 +1928,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2100,9 +1976,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2113,9 +1987,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsgtu.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2163,9 +2035,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2176,9 +2046,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsle.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2228,9 +2096,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2241,9 +2107,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsle.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2291,9 +2155,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsle.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2304,9 +2166,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2356,9 +2216,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmsle.vi v12, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2369,9 +2227,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_swap_nxv8i32(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vmsgt.vi v12, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i32(<vscale x 8 x i32> splat (i32 4), <vscale x 8 x i32> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -2572,9 +2428,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2584,9 +2438,7 @@ define <vscale x 1 x i1> @icmp_eq_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"eq", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2658,9 +2510,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2670,9 +2520,7 @@ define <vscale x 1 x i1> @icmp_ne_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"ne", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2744,9 +2592,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2756,9 +2602,7 @@ define <vscale x 1 x i1> @icmp_ugt_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2832,9 +2676,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2844,9 +2686,7 @@ define <vscale x 1 x i1> @icmp_uge_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2918,9 +2758,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -2930,9 +2768,7 @@ define <vscale x 1 x i1> @icmp_ult_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3004,9 +2840,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3016,9 +2850,7 @@ define <vscale x 1 x i1> @icmp_sgt_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"sgt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3092,9 +2924,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3104,9 +2934,7 @@ define <vscale x 1 x i1> @icmp_sge_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"sge", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3178,9 +3006,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3190,9 +3016,7 @@ define <vscale x 1 x i1> @icmp_slt_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"slt", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3266,9 +3090,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3278,9 +3100,7 @@ define <vscale x 1 x i1> @icmp_sle_vi_swap_nxv1i64(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i1> @llvm.vp.icmp.nxv1i64(<vscale x 1 x i64> splat (i64 4), <vscale x 1 x i64> %va, metadata !"sle", <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i1> %v
 }
 
@@ -3360,9 +3180,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vmseq.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3373,9 +3191,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vscal
 ; CHECK-NEXT:    vmseq.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"eq", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3453,9 +3269,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vmsne.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3466,9 +3280,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vscal
 ; CHECK-NEXT:    vmsne.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"ne", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3546,9 +3358,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsgtu.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3559,9 +3369,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsleu.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3641,9 +3449,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsgtu.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3654,9 +3460,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsleu.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3734,9 +3538,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsleu.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3747,9 +3549,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsgtu.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3827,9 +3627,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsgt.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3840,9 +3638,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsle.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"sgt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3922,9 +3718,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsgt.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -3935,9 +3729,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsle.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"sge", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -4015,9 +3807,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsle.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -4028,9 +3818,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsgt.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"slt", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -4110,9 +3898,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vmsle.vi v16, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
 
@@ -4123,8 +3909,6 @@ define <vscale x 8 x i1> @icmp_sle_vi_swap_nxv8i64(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vmsgt.vi v16, v8, 3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i1> @llvm.vp.icmp.nxv8i64(<vscale x 8 x i64> splat (i64 4), <vscale x 8 x i64> %va, metadata !"sle", <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i1> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
index 5f35a4e50a9523..90ffeff9689e07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
@@ -78,9 +78,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i8> %va, %splat
+  %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -90,9 +88,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i8> %va, %splat
+  %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -102,9 +98,7 @@ define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i8> %splat, %va
+  %vc = icmp eq <vscale x 8 x i8> splat (i8 5), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -148,9 +142,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ne <vscale x 8 x i8> %va, %splat
+  %vc = icmp ne <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -194,9 +186,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+  %vc = icmp ugt <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -242,9 +232,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vmv.v.i v9, -16
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -254,9 +242,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -266,9 +252,7 @@ define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %splat, %va
+  %vc = icmp uge <vscale x 8 x i8> splat (i8 15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -278,9 +262,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -290,9 +272,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -302,9 +282,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -314,9 +292,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i8> %va, %splat
+  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -375,9 +351,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %va, %splat
+  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -387,9 +361,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %va, %splat
+  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -399,9 +371,7 @@ define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %splat, %va
+  %vc = icmp ult <vscale x 8 x i8> splat (i8 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -411,9 +381,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %va, %splat
+  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -423,9 +391,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %va, %splat
+  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -435,9 +401,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i8> %va, %splat
+  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -496,9 +460,7 @@ define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ule <vscale x 8 x i8> %va, %splat
+  %vc = icmp ule <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -542,9 +504,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+  %vc = icmp sgt <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -590,9 +550,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vmv.v.i v9, -16
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i8> %va, %splat
+  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -602,9 +560,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i8> %va, %splat
+  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -614,9 +570,7 @@ define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i8> %splat, %va
+  %vc = icmp sge <vscale x 8 x i8> splat (i8 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -626,9 +580,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i8> %va, %splat
+  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -638,9 +590,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i8> %va, %splat
+  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -685,9 +635,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i8> %va, %splat
+  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -697,9 +645,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i8> %va, %splat
+  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -709,9 +655,7 @@ define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i8> %splat, %va
+  %vc = icmp slt <vscale x 8 x i8> splat (i8 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -721,9 +665,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i8> %va, %splat
+  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -733,9 +675,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i8> %va, %splat
+  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -780,9 +720,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sle <vscale x 8 x i8> %va, %splat
+  %vc = icmp sle <vscale x 8 x i8> %va, splat (i8 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -826,9 +764,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i16> %va, %splat
+  %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -838,9 +774,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i16> %va, %splat
+  %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -850,9 +784,7 @@ define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i16> %splat, %va
+  %vc = icmp eq <vscale x 8 x i16> splat (i16 5), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -896,9 +828,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ne <vscale x 8 x i16> %va, %splat
+  %vc = icmp ne <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -942,9 +872,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+  %vc = icmp ugt <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -990,9 +918,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmv.v.i v10, -16
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1002,9 +928,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1014,9 +938,7 @@ define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %splat, %va
+  %vc = icmp uge <vscale x 8 x i16> splat (i16 15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1026,9 +948,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1038,9 +958,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1050,9 +968,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1062,9 +978,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i16> %va, %splat
+  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1109,9 +1023,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %va, %splat
+  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1121,9 +1033,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %va, %splat
+  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1133,9 +1043,7 @@ define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %splat, %va
+  %vc = icmp ult <vscale x 8 x i16> splat (i16 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1145,9 +1053,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %va, %splat
+  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1157,9 +1063,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %va, %splat
+  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1169,9 +1073,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i16> %va, %splat
+  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1216,9 +1118,7 @@ define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ule <vscale x 8 x i16> %va, %splat
+  %vc = icmp ule <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1262,9 +1162,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+  %vc = icmp sgt <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1310,9 +1208,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmv.v.i v10, -16
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i16> %va, %splat
+  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1322,9 +1218,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i16> %va, %splat
+  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1334,9 +1228,7 @@ define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i16> %splat, %va
+  %vc = icmp sge <vscale x 8 x i16> splat (i16 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1346,9 +1238,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i16> %va, %splat
+  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1358,9 +1248,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i16> %va, %splat
+  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1405,9 +1293,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i16> %va, %splat
+  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1417,9 +1303,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i16> %va, %splat
+  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1429,9 +1313,7 @@ define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i16> %splat, %va
+  %vc = icmp slt <vscale x 8 x i16> splat (i16 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1441,9 +1323,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i16> %va, %splat
+  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1453,9 +1333,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i16> %va, %splat
+  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1500,9 +1378,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sle <vscale x 8 x i16> %va, %splat
+  %vc = icmp sle <vscale x 8 x i16> %va, splat (i16 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1546,9 +1422,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i32> %va, %splat
+  %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1558,9 +1432,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i32> %va, %splat
+  %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1570,9 +1442,7 @@ define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i32> %splat, %va
+  %vc = icmp eq <vscale x 8 x i32> splat (i32 5), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1616,9 +1486,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ne <vscale x 8 x i32> %va, %splat
+  %vc = icmp ne <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1662,9 +1530,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+  %vc = icmp ugt <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1710,9 +1576,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vmv.v.i v12, -16
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1722,9 +1586,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1734,9 +1596,7 @@ define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %splat, %va
+  %vc = icmp uge <vscale x 8 x i32> splat (i32 15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1746,9 +1606,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1758,9 +1616,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1770,9 +1626,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1782,9 +1636,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i32> %va, %splat
+  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1829,9 +1681,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %va, %splat
+  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1841,9 +1691,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %va, %splat
+  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1853,9 +1701,7 @@ define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %splat, %va
+  %vc = icmp ult <vscale x 8 x i32> splat (i32 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1865,9 +1711,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %va, %splat
+  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1877,9 +1721,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %va, %splat
+  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1889,9 +1731,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i32> %va, %splat
+  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1936,9 +1776,7 @@ define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ule <vscale x 8 x i32> %va, %splat
+  %vc = icmp ule <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -1982,9 +1820,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+  %vc = icmp sgt <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2030,9 +1866,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vmv.v.i v12, -16
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i32> %va, %splat
+  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2042,9 +1876,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i32> %va, %splat
+  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2054,9 +1886,7 @@ define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i32> %splat, %va
+  %vc = icmp sge <vscale x 8 x i32> splat (i32 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2066,9 +1896,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i32> %va, %splat
+  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2078,9 +1906,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i32> %va, %splat
+  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2125,9 +1951,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i32> %va, %splat
+  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2137,9 +1961,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i32> %va, %splat
+  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2149,9 +1971,7 @@ define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i32> %splat, %va
+  %vc = icmp slt <vscale x 8 x i32> splat (i32 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2161,9 +1981,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i32> %va, %splat
+  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2173,9 +1991,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i32> %va, %splat
+  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2220,9 +2036,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sle <vscale x 8 x i32> %va, %splat
+  %vc = icmp sle <vscale x 8 x i32> %va, splat (i32 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2292,9 +2106,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i64> %va, %splat
+  %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2304,9 +2116,7 @@ define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i64> %va, %splat
+  %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2316,9 +2126,7 @@ define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i64> %splat, %va
+  %vc = icmp eq <vscale x 8 x i64> splat (i64 5), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2388,9 +2196,7 @@ define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ne <vscale x 8 x i64> %va, %splat
+  %vc = icmp ne <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2460,9 +2266,7 @@ define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+  %vc = icmp ugt <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2534,9 +2338,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vmv.v.i v16, -16
 ; CHECK-NEXT:    vmsleu.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2546,9 +2348,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2558,9 +2358,7 @@ define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %splat, %va
+  %vc = icmp uge <vscale x 8 x i64> splat (i64 15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2570,9 +2368,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2582,9 +2378,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2594,9 +2388,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2606,9 +2398,7 @@ define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp uge <vscale x 8 x i64> %va, %splat
+  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2679,9 +2469,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %va, %splat
+  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2691,9 +2479,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %va, %splat
+  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2703,9 +2489,7 @@ define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %splat, %va
+  %vc = icmp ult <vscale x 8 x i64> splat (i64 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2715,9 +2499,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %va, %splat
+  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2727,9 +2509,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %va, %splat
+  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 1)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2739,9 +2519,7 @@ define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ult <vscale x 8 x i64> %va, %splat
+  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2812,9 +2590,7 @@ define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp ule <vscale x 8 x i64> %va, %splat
+  %vc = icmp ule <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2884,9 +2660,7 @@ define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+  %vc = icmp sgt <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2958,9 +2732,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vmv.v.i v16, -16
 ; CHECK-NEXT:    vmsle.vv v0, v16, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i64> %va, %splat
+  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2970,9 +2742,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i64> %va, %splat
+  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2982,9 +2752,7 @@ define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i64> %splat, %va
+  %vc = icmp sge <vscale x 8 x i64> splat (i64 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -2994,9 +2762,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i64> %va, %splat
+  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3006,9 +2772,7 @@ define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sge <vscale x 8 x i64> %va, %splat
+  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3079,9 +2843,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i64> %va, %splat
+  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3091,9 +2853,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i64> %va, %splat
+  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -15)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3103,9 +2863,7 @@ define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i64> %splat, %va
+  %vc = icmp slt <vscale x 8 x i64> splat (i64 -15), %va
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3115,9 +2873,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i64> %va, %splat
+  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 0)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3127,9 +2883,7 @@ define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp slt <vscale x 8 x i64> %va, %splat
+  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3200,9 +2954,7 @@ define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp sle <vscale x 8 x i64> %va, %splat
+  %vc = icmp sle <vscale x 8 x i64> %va, splat (i64 5)
   ret <vscale x 8 x i1> %vc
 }
 
@@ -3216,11 +2968,7 @@ define <vscale x 8 x i1> @icmp_eq_ii_nxv8i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %heada = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %splata = shufflevector <vscale x 8 x i8> %heada, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %headb = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %splatb = shufflevector <vscale x 8 x i8> %headb, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = icmp eq <vscale x 8 x i8> %splata, %splatb
+  %vc = icmp eq <vscale x 8 x i8> splat (i8 5), splat (i8 2)
   ret <vscale x 8 x i1> %vc
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 5d09c39dfd6e68..9046c861c3367a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -1214,8 +1214,6 @@ vector.ph:                                        ; preds = %entry
   %3 = shl i64 %2, 2
   %n.mod.vf = urem i64 1024, %3
   %n.vec = sub nsw i64 1024, %n.mod.vf
-  %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
   %4 = call i64 @llvm.vscale.i64()
   %5 = shl i64 %4, 2
   br label %vector.body
@@ -1224,7 +1222,7 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %6 = getelementptr inbounds i32, ptr %a, i64 %index
   %wide.load = load <vscale x 4 x i32>, ptr %6, align 4
-  %7 = ashr <vscale x 4 x i32> %wide.load, %broadcast.splat
+  %7 = ashr <vscale x 4 x i32> %wide.load, splat (i32 2)
   store <vscale x 4 x i32> %7, ptr %6, align 4
   %index.next = add nuw i64 %index, %5
   %8 = icmp eq i64 %index.next, %n.vec
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index 8f02ca65358102..eff8c26d4d0616 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -85,10 +85,8 @@ define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-  %3 = mul <vscale x 8 x i8> %2, %1
+  %3 = mul <vscale x 8 x i8> %2, splat (i8 3)
   ret <vscale x 8 x i8> %3
 }
 
@@ -100,10 +98,8 @@ define <vscale x 8 x i8> @shl_stepvector_nxv8i8() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-  %3 = shl <vscale x 8 x i8> %2, %1
+  %3 = shl <vscale x 8 x i8> %2, splat (i8 2)
   ret <vscale x 8 x i8> %3
 }
 
@@ -250,10 +246,8 @@ define <vscale x 16 x i16> @mul_stepvector_nxv16i16() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
-  %1 = shufflevector <vscale x 16 x i16> %0, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-  %3 = mul <vscale x 16 x i16> %2, %1
+  %3 = mul <vscale x 16 x i16> %2, splat (i16 3)
   ret <vscale x 16 x i16> %3
 }
 
@@ -265,10 +259,8 @@ define <vscale x 16 x i16> @shl_stepvector_nxv16i16() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %1 = shufflevector <vscale x 16 x i16> %0, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-  %3 = shl <vscale x 16 x i16> %2, %1
+  %3 = shl <vscale x 16 x i16> %2, splat (i16 2)
   ret <vscale x 16 x i16> %3
 }
 
@@ -379,10 +371,8 @@ define <vscale x 16 x i32> @mul_stepvector_nxv16i32() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
-  %1 = shufflevector <vscale x 16 x i32> %0, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-  %3 = mul <vscale x 16 x i32> %2, %1
+  %3 = mul <vscale x 16 x i32> %2, splat (i32 3)
   ret <vscale x 16 x i32> %3
 }
 
@@ -394,10 +384,8 @@ define <vscale x 16 x i32> @shl_stepvector_nxv16i32() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %1 = shufflevector <vscale x 16 x i32> %0, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-  %3 = shl <vscale x 16 x i32> %2, %1
+  %3 = shl <vscale x 16 x i32> %2, splat (i32 2)
   ret <vscale x 16 x i32> %3
 }
 
@@ -484,10 +472,8 @@ define <vscale x 8 x i64> @mul_stepvector_nxv8i64() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
-  %1 = shufflevector <vscale x 8 x i64> %0, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-  %3 = mul <vscale x 8 x i64> %2, %1
+  %3 = mul <vscale x 8 x i64> %2, splat (i64 3)
   ret <vscale x 8 x i64> %3
 }
 
@@ -520,10 +506,8 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 8 x i64> poison, i64 33333333333, i32 0
-  %1 = shufflevector <vscale x 8 x i64> %0, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-  %3 = mul <vscale x 8 x i64> %2, %1
+  %3 = mul <vscale x 8 x i64> %2, splat (i64 33333333333)
   ret <vscale x 8 x i64> %3
 }
 
@@ -535,10 +519,8 @@ define <vscale x 8 x i64> @shl_stepvector_nxv8i64() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %1 = shufflevector <vscale x 8 x i64> %0, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-  %3 = shl <vscale x 8 x i64> %2, %1
+  %3 = shl <vscale x 8 x i64> %2, splat (i64 2)
   ret <vscale x 8 x i64> %3
 }
 
@@ -637,10 +619,8 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i64> poison, i64 3, i32 0
-  %1 = shufflevector <vscale x 16 x i64> %0, <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
-  %3 = mul <vscale x 16 x i64> %2, %1
+  %3 = mul <vscale x 16 x i64> %2, splat (i64 3)
   ret <vscale x 16 x i64> %3
 }
 
@@ -692,10 +672,8 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i64> poison, i64 33333333333, i32 0
-  %1 = shufflevector <vscale x 16 x i64> %0, <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
-  %3 = mul <vscale x 16 x i64> %2, %1
+  %3 = mul <vscale x 16 x i64> %2, splat (i64 33333333333)
   ret <vscale x 16 x i64> %3
 }
 
@@ -727,10 +705,8 @@ define <vscale x 16 x i64> @shl_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %0 = insertelement <vscale x 16 x i64> poison, i64 2, i32 0
-  %1 = shufflevector <vscale x 16 x i64> %0, <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
   %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
-  %3 = shl <vscale x 16 x i64> %2, %1
+  %3 = shl <vscale x 16 x i64> %2, splat (i64 2)
   ret <vscale x 16 x i64> %3
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
index 6b584cfb22a52e..ea428bb4771718 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
@@ -206,9 +206,7 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
   %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-  %splat.insert = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
-  %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %offset = shl <vscale x 1 x i64> %step, %splat
+  %offset = shl <vscale x 1 x i64> %step, splat (i64 3)
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
   <vscale x 1 x ptr> %ptrs,
@@ -222,17 +220,13 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 define <vscale x 1 x i64> @neg_shl_is_not_commutative(ptr %p) {
 ; CHECK-LABEL: @neg_shl_is_not_commutative(
 ; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-; CHECK-NEXT:    [[SPLAT_INSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
-; CHECK-NEXT:    [[SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[SPLAT_INSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-; CHECK-NEXT:    [[OFFSET:%.*]] = shl <vscale x 1 x i64> [[SPLAT]], [[STEP]]
+; CHECK-NEXT:    [[OFFSET:%.*]] = shl <vscale x 1 x i64> shufflevector (<vscale x 1 x i64> insertelement (<vscale x 1 x i64> poison, i64 3, i64 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), [[STEP]]
 ; CHECK-NEXT:    [[PTRS:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 1 x i64> [[OFFSET]]
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> [[PTRS]], i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> poison)
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
   %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-  %splat.insert = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
-  %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %offset = shl <vscale x 1 x i64> %splat, %step
+  %offset = shl <vscale x 1 x i64> splat (i64 3), %step
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
   <vscale x 1 x ptr> %ptrs,
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index a834630e7ebea5..0e2105d5cba861 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -60,9 +60,7 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 s
 ; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-RV64-NEXT:    vlse8.v v8, (a0), a1
 ; CHECK-RV64-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
@@ -84,9 +82,7 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signe
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
@@ -142,9 +138,7 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signe
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
@@ -178,9 +172,7 @@ define <vscale x 2 x i16> @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 sig
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
@@ -270,9 +262,7 @@ define <vscale x 4 x i32> @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 sig
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
@@ -316,9 +306,7 @@ define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 sig
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
@@ -388,9 +376,7 @@ define <vscale x 2 x half> @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 si
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
@@ -492,9 +478,7 @@ define <vscale x 8 x float> @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 s
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
@@ -550,9 +534,7 @@ define <vscale x 4 x double> @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
@@ -585,9 +567,7 @@ define <vscale x 3 x double> @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <vscale x 3 x i1> %one, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> %allones, i32 %evl)
+  %v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x double> %v
 }
 
@@ -686,9 +666,7 @@ define <vscale x 16 x double> @strided_load_nxv16f64_allones_mask(ptr %ptr, i64
 ; CHECK-RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-RV64-NEXT:    vlse64.v v8, (a0), a1
 ; CHECK-RV64-NEXT:    ret
-  %one = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %one, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index cf6ce89b9b5a46..9378bb3d3ca61f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -460,9 +460,7 @@ define void @strided_vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, ptr %pt
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.nxv1i8.p0.i32(<vscale x 1 x i8> %val, ptr %ptr, i32 %strided, <vscale x 1 x i1> %b, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.nxv1i8.p0.i32(<vscale x 1 x i8> %val, ptr %ptr, i32 %strided, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
@@ -483,9 +481,7 @@ define void @strided_vpstore_nxv3f32_allones_mask(<vscale x 3 x float> %v, ptr %
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <vscale x 3 x i1> %one, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.nxv3f32.p0.i32(<vscale x 3 x float> %v, ptr %ptr, i32 %stride, <vscale x 3 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.nxv3f32.p0.i32(<vscale x 3 x float> %v, ptr %ptr, i32 %stride, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
@@ -539,9 +535,7 @@ define void @strided_store_nxv16f64_allones_mask(<vscale x 16 x double> %v, ptr
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
 ; CHECK-NEXT:    vsse64.v v16, (a0), a1
 ; CHECK-NEXT:    ret
-  %one = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %one, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  call void @llvm.experimental.vp.strided.store.nxv16f64.p0.i32(<vscale x 16 x double> %v, ptr %ptr, i32 %stride, <vscale x 16 x i1> %allones, i32 %evl)
+  call void @llvm.experimental.vp.strided.store.nxv16f64.p0.i32(<vscale x 16 x double> %v, ptr %ptr, i32 %stride, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
index bfbbb4b4067f84..52c2cace185f78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
@@ -34,12 +34,8 @@ define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq0(<vscale x 1 x i16> %x)
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
 ; RV64-NEXT:    ret
-  %ins1 = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
-  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %urem = urem <vscale x 1 x i16> %x, %splat1
-  %ins2 = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
-  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
+  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
   %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %ext
 }
@@ -70,12 +66,8 @@ define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq0(<vscale x 1 x i16> %x)
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
 ; RV64-NEXT:    ret
-  %ins1 = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %urem = urem <vscale x 1 x i16> %x, %splat1
-  %ins2 = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
-  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
+  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 0)
   %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %ext
 }
@@ -116,12 +108,8 @@ define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x)
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
 ; RV64-NEXT:    ret
-  %ins1 = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
-  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %urem = urem <vscale x 1 x i16> %x, %splat1
-  %ins2 = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
-  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %urem = urem <vscale x 1 x i16> %x, splat (i16 6)
+  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
   %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %ext
 }
@@ -156,12 +144,8 @@ define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x)
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vmerge.vim v8, v8, -1, v0
 ; RV64-NEXT:    ret
-  %ins1 = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %splat1 = shufflevector <vscale x 1 x i16> %ins1, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %urem = urem <vscale x 1 x i16> %x, %splat1
-  %ins2 = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
-  %splat2 = shufflevector <vscale x 1 x i16> %ins2, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ne <vscale x 1 x i16> %urem, %splat2
+  %urem = urem <vscale x 1 x i16> %x, splat (i16 5)
+  %cmp = icmp ne <vscale x 1 x i16> %urem, splat (i16 1)
   %ext = sext <vscale x 1 x i1> %cmp to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %ext
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
index 1cf57371455cfb..5b14014a252f12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
@@ -12,9 +12,7 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor(<vscale x 8 x i8> %x, <vscale x
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i16> %add, %splat
+  %div = lshr <vscale x 8 x i16> %add, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -31,9 +29,7 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_floor(<vscale x 8 x i8> %x, i8 %y) {
   %ysplat = shufflevector <vscale x 8 x i8> %yhead, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i8> %ysplat to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i16> %add, %splat
+  %div = lshr <vscale x 8 x i16> %add, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -48,9 +44,7 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor_sexti16(<vscale x 8 x i8> %x, <
   %xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i16> %add, %splat
+  %div = lshr <vscale x 8 x i16> %add, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -65,9 +59,7 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor_zexti32(<vscale x 8 x i8> %x, <
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i32>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i32> %add, %splat
+  %div = lshr <vscale x 8 x i32> %add, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -82,9 +74,7 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor_lshr2(<vscale x 8 x i8> %x, <vs
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i16> %add, %splat
+  %div = lshr <vscale x 8 x i16> %add, splat (i16 2)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -99,9 +89,7 @@ define <vscale x 8 x i16> @vaaddu_vv_nxv8i16_floor(<vscale x 8 x i16> %x, <vscal
   %xzv = zext <vscale x 8 x i16> %x to <vscale x 8 x i32>
   %yzv = zext <vscale x 8 x i16> %y to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i32> %add, %splat
+  %div = lshr <vscale x 8 x i32> %add, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ret
 }
@@ -118,9 +106,7 @@ define <vscale x 8 x i16> @vaaddu_vx_nxv8i16_floor(<vscale x 8 x i16> %x, i16 %y
   %ysplat = shufflevector <vscale x 8 x i16> %yhead, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i16> %ysplat to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i32> %add, %splat
+  %div = lshr <vscale x 8 x i32> %add, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ret
 }
@@ -135,9 +121,7 @@ define <vscale x 8 x i32> @vaaddu_vv_nxv8i32_floor(<vscale x 8 x i32> %x, <vscal
   %xzv = zext <vscale x 8 x i32> %x to <vscale x 8 x i64>
   %yzv = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
   %add = add nuw nsw <vscale x 8 x i64> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <vscale x 8 x i64> %one, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i64> %add, %splat
+  %div = lshr <vscale x 8 x i64> %add, splat (i64 1)
   %ret = trunc <vscale x 8 x i64> %div to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %ret
 }
@@ -154,9 +138,7 @@ define <vscale x 8 x i32> @vaaddu_vx_nxv8i32_floor(<vscale x 8 x i32> %x, i32 %y
   %ysplat = shufflevector <vscale x 8 x i32> %yhead, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i32> %ysplat to <vscale x 8 x i64>
   %add = add nuw nsw <vscale x 8 x i64> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <vscale x 8 x i64> %one, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i64> %add, %splat
+  %div = lshr <vscale x 8 x i64> %add, splat (i64 1)
   %ret = trunc <vscale x 8 x i64> %div to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %ret
 }
@@ -171,9 +153,7 @@ define <vscale x 8 x i64> @vaaddu_vv_nxv8i64_floor(<vscale x 8 x i64> %x, <vscal
   %xzv = zext <vscale x 8 x i64> %x to <vscale x 8 x i128>
   %yzv = zext <vscale x 8 x i64> %y to <vscale x 8 x i128>
   %add = add nuw nsw <vscale x 8 x i128> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <vscale x 8 x i128> %one, <vscale x 8 x i128> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i128> %add, %splat
+  %div = lshr <vscale x 8 x i128> %add, splat (i128 1)
   %ret = trunc <vscale x 8 x i128> %div to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ret
 }
@@ -204,9 +184,7 @@ define <vscale x 8 x i64> @vaaddu_vx_nxv8i64_floor(<vscale x 8 x i64> %x, i64 %y
   %ysplat = shufflevector <vscale x 8 x i64> %yhead, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i64> %ysplat to <vscale x 8 x i128>
   %add = add nuw nsw <vscale x 8 x i128> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <vscale x 8 x i128> %one, <vscale x 8 x i128> poison, <vscale x 8 x i32> zeroinitializer
-  %div = lshr <vscale x 8 x i128> %add, %splat
+  %div = lshr <vscale x 8 x i128> %add, splat (i128 1)
   %ret = trunc <vscale x 8 x i128> %div to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ret
 }
@@ -221,10 +199,8 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil(<vscale x 8 x i8> %x, <vscale x
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i16> %add, %splat
-  %div = lshr <vscale x 8 x i16> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i16> %add, splat (i16 1)
+  %div = lshr <vscale x 8 x i16> %add1, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -241,10 +217,8 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_ceil(<vscale x 8 x i8> %x, i8 %y) {
   %ysplat = shufflevector <vscale x 8 x i8> %yhead, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i8> %ysplat to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i16> %add, %splat
-  %div = lshr <vscale x 8 x i16> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i16> %add, splat (i16 1)
+  %div = lshr <vscale x 8 x i16> %add1, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -262,10 +236,8 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_sexti16(<vscale x 8 x i8> %x, <v
   %xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i16> %add, %splat
-  %div = lshr <vscale x 8 x i16> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i16> %add, splat (i16 1)
+  %div = lshr <vscale x 8 x i16> %add1, splat (i16 1)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -280,10 +252,8 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_zexti32(<vscale x 8 x i8> %x, <v
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i32>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i32> %add, %splat
-  %div = lshr <vscale x 8 x i32> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i32> %add, splat (i32 1)
+  %div = lshr <vscale x 8 x i32> %add1, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -301,10 +271,8 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_lshr2(<vscale x 8 x i8> %x, <vsc
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i16> %add, %splat
-  %div = lshr <vscale x 8 x i16> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i16> %add, splat (i16 2)
+  %div = lshr <vscale x 8 x i16> %add1, splat (i16 2)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -322,12 +290,8 @@ define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_add2(<vscale x 8 x i8> %x, <vsca
   %xzv = zext <vscale x 8 x i8> %x to <vscale x 8 x i16>
   %yzv = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
   %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat1 = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %two = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat2 = shufflevector <vscale x 8 x i16> %two, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %add2 = add nuw nsw <vscale x 8 x i16> %add, %splat2
-  %div = lshr <vscale x 8 x i16> %add2, %splat1
+  %add2 = add nuw nsw <vscale x 8 x i16> %add, splat (i16 2)
+  %div = lshr <vscale x 8 x i16> %add2, splat (i16 2)
   %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %ret
 }
@@ -342,10 +306,8 @@ define <vscale x 8 x i16> @vaaddu_vv_nxv8i16_ceil(<vscale x 8 x i16> %x, <vscale
   %xzv = zext <vscale x 8 x i16> %x to <vscale x 8 x i32>
   %yzv = zext <vscale x 8 x i16> %y to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i32> %add, %splat
-  %div = lshr <vscale x 8 x i32> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i32> %add, splat (i32 1)
+  %div = lshr <vscale x 8 x i32> %add1, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ret
 }
@@ -362,10 +324,8 @@ define <vscale x 8 x i16> @vaaddu_vx_nxv8i16_ceil(<vscale x 8 x i16> %x, i16 %y)
   %ysplat = shufflevector <vscale x 8 x i16> %yhead, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i16> %ysplat to <vscale x 8 x i32>
   %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i32> %add, %splat
-  %div = lshr <vscale x 8 x i32> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i32> %add, splat (i32 1)
+  %div = lshr <vscale x 8 x i32> %add1, splat (i32 1)
   %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ret
 }
@@ -380,10 +340,8 @@ define <vscale x 8 x i32> @vaaddu_vv_nxv8i32_ceil(<vscale x 8 x i32> %x, <vscale
   %xzv = zext <vscale x 8 x i32> %x to <vscale x 8 x i64>
   %yzv = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
   %add = add nuw nsw <vscale x 8 x i64> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <vscale x 8 x i64> %one, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i64> %add, %splat
-  %div = lshr <vscale x 8 x i64> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i64> %add, splat (i64 1)
+  %div = lshr <vscale x 8 x i64> %add1, splat (i64 1)
   %ret = trunc <vscale x 8 x i64> %div to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %ret
 }
@@ -400,10 +358,8 @@ define <vscale x 8 x i32> @vaaddu_vx_nxv8i32_ceil(<vscale x 8 x i32> %x, i32 %y)
   %ysplat = shufflevector <vscale x 8 x i32> %yhead, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i32> %ysplat to <vscale x 8 x i64>
   %add = add nuw nsw <vscale x 8 x i64> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i64> poison, i64 1, i64 0
-  %splat = shufflevector <vscale x 8 x i64> %one, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i64> %add, %splat
-  %div = lshr <vscale x 8 x i64> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i64> %add, splat (i64 1)
+  %div = lshr <vscale x 8 x i64> %add1, splat (i64 1)
   %ret = trunc <vscale x 8 x i64> %div to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %ret
 }
@@ -418,10 +374,8 @@ define <vscale x 8 x i64> @vaaddu_vv_nxv8i64_ceil(<vscale x 8 x i64> %x, <vscale
   %xzv = zext <vscale x 8 x i64> %x to <vscale x 8 x i128>
   %yzv = zext <vscale x 8 x i64> %y to <vscale x 8 x i128>
   %add = add nuw nsw <vscale x 8 x i128> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <vscale x 8 x i128> %one, <vscale x 8 x i128> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i128> %add, %splat
-  %div = lshr <vscale x 8 x i128> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i128> %add, splat (i128 1)
+  %div = lshr <vscale x 8 x i128> %add1, splat (i128 1)
   %ret = trunc <vscale x 8 x i128> %div to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ret
 }
@@ -452,10 +406,8 @@ define <vscale x 8 x i64> @vaaddu_vx_nxv8i64_ceil(<vscale x 8 x i64> %x, i64 %y)
   %ysplat = shufflevector <vscale x 8 x i64> %yhead, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %yzv = zext <vscale x 8 x i64> %ysplat to <vscale x 8 x i128>
   %add = add nuw nsw <vscale x 8 x i128> %xzv, %yzv
-  %one = insertelement <vscale x 8 x i128> poison, i128 1, i128 0
-  %splat = shufflevector <vscale x 8 x i128> %one, <vscale x 8 x i128> poison, <vscale x 8 x i32> zeroinitializer
-  %add1 = add nuw nsw <vscale x 8 x i128> %add, %splat
-  %div = lshr <vscale x 8 x i128> %add1, %splat
+  %add1 = add nuw nsw <vscale x 8 x i128> %add, splat (i128 1)
+  %div = lshr <vscale x 8 x i128> %add1, splat (i128 1)
   %ret = trunc <vscale x 8 x i128> %div to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ret
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
index 264cf2e8df096a..27fceb0112ae32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
@@ -20,9 +20,7 @@ define <vscale x 1 x i8> @vadd_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i8> %va, %splat
+  %vc = add <vscale x 1 x i8> %va, splat (i8 -1)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -32,9 +30,7 @@ define <vscale x 1 x i8> @vadd_vx_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i8> %va, %splat
+  %vc = add <vscale x 1 x i8> %va, splat (i8 2)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -45,11 +41,7 @@ define <vscale x 1 x i8> @vadd_ii_nxv1i8_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 5
 ; CHECK-NEXT:    ret
-  %heada = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %splata = shufflevector <vscale x 1 x i8> %heada, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %headb = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %splatb = shufflevector <vscale x 1 x i8> %headb, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i8> %splata, %splatb
+  %vc = add <vscale x 1 x i8> splat (i8 2), splat (i8 3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -71,9 +63,7 @@ define <vscale x 2 x i8> @vadd_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i8> %va, %splat
+  %vc = add <vscale x 2 x i8> %va, splat (i8 -1)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -83,9 +73,7 @@ define <vscale x 2 x i8> @vadd_vx_nxv2i8_1(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i8> %va, %splat
+  %vc = add <vscale x 2 x i8> %va, splat (i8 2)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -107,9 +95,7 @@ define <vscale x 4 x i8> @vadd_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i8> %va, %splat
+  %vc = add <vscale x 4 x i8> %va, splat (i8 -1)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -119,9 +105,7 @@ define <vscale x 4 x i8> @vadd_vx_nxv4i8_1(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i8> %va, %splat
+  %vc = add <vscale x 4 x i8> %va, splat (i8 2)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -143,9 +127,7 @@ define <vscale x 8 x i8> @vadd_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i8> %va, %splat
+  %vc = add <vscale x 8 x i8> %va, splat (i8 -1)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -155,9 +137,7 @@ define <vscale x 8 x i8> @vadd_vx_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i8> %va, %splat
+  %vc = add <vscale x 8 x i8> %va, splat (i8 2)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -179,9 +159,7 @@ define <vscale x 16 x i8> @vadd_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i8> %va, %splat
+  %vc = add <vscale x 16 x i8> %va, splat (i8 -1)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -191,9 +169,7 @@ define <vscale x 16 x i8> @vadd_vx_nxv16i8_1(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i8> %va, %splat
+  %vc = add <vscale x 16 x i8> %va, splat (i8 2)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -215,9 +191,7 @@ define <vscale x 32 x i8> @vadd_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = add <vscale x 32 x i8> %va, %splat
+  %vc = add <vscale x 32 x i8> %va, splat (i8 -1)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -227,9 +201,7 @@ define <vscale x 32 x i8> @vadd_vx_nxv32i8_1(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = add <vscale x 32 x i8> %va, %splat
+  %vc = add <vscale x 32 x i8> %va, splat (i8 2)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -251,9 +223,7 @@ define <vscale x 64 x i8> @vadd_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = add <vscale x 64 x i8> %va, %splat
+  %vc = add <vscale x 64 x i8> %va, splat (i8 -1)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -263,9 +233,7 @@ define <vscale x 64 x i8> @vadd_vx_nxv64i8_1(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = add <vscale x 64 x i8> %va, %splat
+  %vc = add <vscale x 64 x i8> %va, splat (i8 2)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -287,9 +255,7 @@ define <vscale x 1 x i16> @vadd_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i16> %va, %splat
+  %vc = add <vscale x 1 x i16> %va, splat (i16 -1)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -299,9 +265,7 @@ define <vscale x 1 x i16> @vadd_vx_nxv1i16_1(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i16> %va, %splat
+  %vc = add <vscale x 1 x i16> %va, splat (i16 2)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -323,9 +287,7 @@ define <vscale x 2 x i16> @vadd_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i16> %va, %splat
+  %vc = add <vscale x 2 x i16> %va, splat (i16 -1)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -335,9 +297,7 @@ define <vscale x 2 x i16> @vadd_vx_nxv2i16_1(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i16> %va, %splat
+  %vc = add <vscale x 2 x i16> %va, splat (i16 2)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -359,9 +319,7 @@ define <vscale x 4 x i16> @vadd_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i16> %va, %splat
+  %vc = add <vscale x 4 x i16> %va, splat (i16 -1)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -371,9 +329,7 @@ define <vscale x 4 x i16> @vadd_vx_nxv4i16_1(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i16> %va, %splat
+  %vc = add <vscale x 4 x i16> %va, splat (i16 2)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -395,9 +351,7 @@ define <vscale x 8 x i16> @vadd_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i16> %va, %splat
+  %vc = add <vscale x 8 x i16> %va, splat (i16 -1)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -407,9 +361,7 @@ define <vscale x 8 x i16> @vadd_vx_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i16> %va, %splat
+  %vc = add <vscale x 8 x i16> %va, splat (i16 2)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -431,9 +383,7 @@ define <vscale x 16 x i16> @vadd_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i16> %va, %splat
+  %vc = add <vscale x 16 x i16> %va, splat (i16 -1)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -443,9 +393,7 @@ define <vscale x 16 x i16> @vadd_vx_nxv16i16_1(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i16> %va, %splat
+  %vc = add <vscale x 16 x i16> %va, splat (i16 2)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -467,9 +415,7 @@ define <vscale x 32 x i16> @vadd_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = add <vscale x 32 x i16> %va, %splat
+  %vc = add <vscale x 32 x i16> %va, splat (i16 -1)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -479,9 +425,7 @@ define <vscale x 32 x i16> @vadd_vx_nxv32i16_1(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = add <vscale x 32 x i16> %va, %splat
+  %vc = add <vscale x 32 x i16> %va, splat (i16 2)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -503,9 +447,7 @@ define <vscale x 1 x i32> @vadd_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i32> %va, %splat
+  %vc = add <vscale x 1 x i32> %va, splat (i32 -1)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -515,9 +457,7 @@ define <vscale x 1 x i32> @vadd_vx_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i32> %va, %splat
+  %vc = add <vscale x 1 x i32> %va, splat (i32 2)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -539,9 +479,7 @@ define <vscale x 2 x i32> @vadd_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i32> %va, %splat
+  %vc = add <vscale x 2 x i32> %va, splat (i32 -1)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -551,9 +489,7 @@ define <vscale x 2 x i32> @vadd_vx_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i32> %va, %splat
+  %vc = add <vscale x 2 x i32> %va, splat (i32 2)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -575,9 +511,7 @@ define <vscale x 4 x i32> @vadd_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i32> %va, %splat
+  %vc = add <vscale x 4 x i32> %va, splat (i32 -1)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -587,9 +521,7 @@ define <vscale x 4 x i32> @vadd_vx_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i32> %va, %splat
+  %vc = add <vscale x 4 x i32> %va, splat (i32 2)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -611,9 +543,7 @@ define <vscale x 8 x i32> @vadd_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i32> %va, %splat
+  %vc = add <vscale x 8 x i32> %va, splat (i32 -1)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -623,9 +553,7 @@ define <vscale x 8 x i32> @vadd_vx_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i32> %va, %splat
+  %vc = add <vscale x 8 x i32> %va, splat (i32 2)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -647,9 +575,7 @@ define <vscale x 16 x i32> @vadd_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i32> %va, %splat
+  %vc = add <vscale x 16 x i32> %va, splat (i32 -1)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -659,9 +585,7 @@ define <vscale x 16 x i32> @vadd_vx_nxv16i32_1(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = add <vscale x 16 x i32> %va, %splat
+  %vc = add <vscale x 16 x i32> %va, splat (i32 2)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -696,9 +620,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i64> %va, %splat
+  %vc = add <vscale x 1 x i64> %va, splat (i64 -1)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -708,9 +630,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = add <vscale x 1 x i64> %va, %splat
+  %vc = add <vscale x 1 x i64> %va, splat (i64 2)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -745,9 +665,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i64> %va, %splat
+  %vc = add <vscale x 2 x i64> %va, splat (i64 -1)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -757,9 +675,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = add <vscale x 2 x i64> %va, %splat
+  %vc = add <vscale x 2 x i64> %va, splat (i64 2)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -794,9 +710,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i64> %va, %splat
+  %vc = add <vscale x 4 x i64> %va, splat (i64 -1)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -806,9 +720,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = add <vscale x 4 x i64> %va, %splat
+  %vc = add <vscale x 4 x i64> %va, splat (i64 2)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -843,9 +755,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i64> %va, %splat
+  %vc = add <vscale x 8 x i64> %va, splat (i64 -1)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -855,9 +765,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = add <vscale x 8 x i64> %va, %splat
+  %vc = add <vscale x 8 x i64> %va, splat (i64 2)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -922,9 +830,7 @@ define <vscale x 8 x i32> @vadd_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> zeroinitializer
   %vc = add <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -937,9 +843,7 @@ define <vscale x 8 x i32> @vadd_vv_mask_negative0_nxv8i32(<vscale x 8 x i32> %va
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
   %vc = add <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 8b65c1a70206e1..4b5e737d22eb83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -74,9 +72,7 @@ define <vscale x 1 x i8> @vadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,9 +82,7 @@ define <vscale x 1 x i8> @vadd_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -98,11 +92,7 @@ define <vscale x 1 x i8> @vadd_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -124,9 +114,7 @@ define <vscale x 2 x i8> @vadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -150,9 +138,7 @@ define <vscale x 2 x i8> @vadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -162,9 +148,7 @@ define <vscale x 2 x i8> @vadd_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -174,11 +158,7 @@ define <vscale x 2 x i8> @vadd_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -200,9 +180,7 @@ define <vscale x 3 x i8> @vadd_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -226,9 +204,7 @@ define <vscale x 3 x i8> @vadd_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -238,9 +214,7 @@ define <vscale x 3 x i8> @vadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -250,11 +224,7 @@ define <vscale x 3 x i8> @vadd_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -276,9 +246,7 @@ define <vscale x 4 x i8> @vadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -302,9 +270,7 @@ define <vscale x 4 x i8> @vadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -314,9 +280,7 @@ define <vscale x 4 x i8> @vadd_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -326,11 +290,7 @@ define <vscale x 4 x i8> @vadd_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -352,9 +312,7 @@ define <vscale x 8 x i8> @vadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -378,9 +336,7 @@ define <vscale x 8 x i8> @vadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -390,9 +346,7 @@ define <vscale x 8 x i8> @vadd_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -402,11 +356,7 @@ define <vscale x 8 x i8> @vadd_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -428,9 +378,7 @@ define <vscale x 16 x i8> @vadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -454,9 +402,7 @@ define <vscale x 16 x i8> @vadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -466,9 +412,7 @@ define <vscale x 16 x i8> @vadd_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -478,11 +422,7 @@ define <vscale x 16 x i8> @vadd_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -504,9 +444,7 @@ define <vscale x 32 x i8> @vadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -530,9 +468,7 @@ define <vscale x 32 x i8> @vadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -542,9 +478,7 @@ define <vscale x 32 x i8> @vadd_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -554,11 +488,7 @@ define <vscale x 32 x i8> @vadd_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -580,9 +510,7 @@ define <vscale x 64 x i8> @vadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -606,9 +534,7 @@ define <vscale x 64 x i8> @vadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -618,9 +544,7 @@ define <vscale x 64 x i8> @vadd_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -630,11 +554,7 @@ define <vscale x 64 x i8> @vadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -664,9 +584,7 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -688,11 +606,7 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -714,9 +628,7 @@ define <vscale x 1 x i16> @vadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -740,9 +652,7 @@ define <vscale x 1 x i16> @vadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -752,9 +662,7 @@ define <vscale x 1 x i16> @vadd_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -764,11 +672,7 @@ define <vscale x 1 x i16> @vadd_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -790,9 +694,7 @@ define <vscale x 2 x i16> @vadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -816,9 +718,7 @@ define <vscale x 2 x i16> @vadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -828,9 +728,7 @@ define <vscale x 2 x i16> @vadd_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -840,11 +738,7 @@ define <vscale x 2 x i16> @vadd_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -866,9 +760,7 @@ define <vscale x 4 x i16> @vadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -892,9 +784,7 @@ define <vscale x 4 x i16> @vadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -904,9 +794,7 @@ define <vscale x 4 x i16> @vadd_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -916,11 +804,7 @@ define <vscale x 4 x i16> @vadd_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -942,9 +826,7 @@ define <vscale x 8 x i16> @vadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -968,9 +850,7 @@ define <vscale x 8 x i16> @vadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -980,9 +860,7 @@ define <vscale x 8 x i16> @vadd_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -992,11 +870,7 @@ define <vscale x 8 x i16> @vadd_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1018,9 +892,7 @@ define <vscale x 16 x i16> @vadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1044,9 +916,7 @@ define <vscale x 16 x i16> @vadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1056,9 +926,7 @@ define <vscale x 16 x i16> @vadd_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1068,11 +936,7 @@ define <vscale x 16 x i16> @vadd_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1094,9 +958,7 @@ define <vscale x 32 x i16> @vadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1120,9 +982,7 @@ define <vscale x 32 x i16> @vadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1132,9 +992,7 @@ define <vscale x 32 x i16> @vadd_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1144,11 +1002,7 @@ define <vscale x 32 x i16> @vadd_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1170,9 +1024,7 @@ define <vscale x 1 x i32> @vadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1196,9 +1048,7 @@ define <vscale x 1 x i32> @vadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1208,9 +1058,7 @@ define <vscale x 1 x i32> @vadd_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1220,11 +1068,7 @@ define <vscale x 1 x i32> @vadd_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1246,9 +1090,7 @@ define <vscale x 2 x i32> @vadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1272,9 +1114,7 @@ define <vscale x 2 x i32> @vadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1284,9 +1124,7 @@ define <vscale x 2 x i32> @vadd_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1296,11 +1134,7 @@ define <vscale x 2 x i32> @vadd_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1322,9 +1156,7 @@ define <vscale x 4 x i32> @vadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1348,9 +1180,7 @@ define <vscale x 4 x i32> @vadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1360,9 +1190,7 @@ define <vscale x 4 x i32> @vadd_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1372,11 +1200,7 @@ define <vscale x 4 x i32> @vadd_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1398,9 +1222,7 @@ define <vscale x 8 x i32> @vadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1424,9 +1246,7 @@ define <vscale x 8 x i32> @vadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1436,9 +1256,7 @@ define <vscale x 8 x i32> @vadd_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1448,11 +1266,7 @@ define <vscale x 8 x i32> @vadd_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1474,9 +1288,7 @@ define <vscale x 16 x i32> @vadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1500,9 +1312,7 @@ define <vscale x 16 x i32> @vadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1512,9 +1322,7 @@ define <vscale x 16 x i32> @vadd_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1524,11 +1332,7 @@ define <vscale x 16 x i32> @vadd_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1559,9 +1363,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1583,11 +1385,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1621,11 +1419,9 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()
   %evl0 = mul i32 %evl, 8
-  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
+  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl0)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1659,11 +1455,9 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, <
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
   %evl = call i32 @llvm.vscale.i32()
   %evl0 = mul i32 %evl, 16
-  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
+  %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl0)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1685,9 +1479,7 @@ define <vscale x 1 x i64> @vadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1739,9 +1531,7 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1751,9 +1541,7 @@ define <vscale x 1 x i64> @vadd_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1763,11 +1551,7 @@ define <vscale x 1 x i64> @vadd_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1789,9 +1573,7 @@ define <vscale x 2 x i64> @vadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1843,9 +1625,7 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1855,9 +1635,7 @@ define <vscale x 2 x i64> @vadd_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1867,11 +1645,7 @@ define <vscale x 2 x i64> @vadd_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1893,9 +1667,7 @@ define <vscale x 4 x i64> @vadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1947,9 +1719,7 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1959,9 +1729,7 @@ define <vscale x 4 x i64> @vadd_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1971,11 +1739,7 @@ define <vscale x 4 x i64> @vadd_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1997,9 +1761,7 @@ define <vscale x 8 x i64> @vadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2051,9 +1813,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2063,9 +1823,7 @@ define <vscale x 8 x i64> @vadd_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2075,10 +1833,6 @@ define <vscale x 8 x i64> @vadd_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
index f10954bb02dfef..40d0d9aa9d1d6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
@@ -30,9 +30,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i8> %va, %splat
+  %vc = and <vscale x 1 x i8> %va, splat (i8 -10)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -42,9 +40,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i8> %va, %splat
+  %vc = and <vscale x 1 x i8> %va, splat (i8 8)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -55,9 +51,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i8> %va, %splat
+  %vc = and <vscale x 1 x i8> %va, splat (i8 16)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -89,9 +83,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i8> %va, %splat
+  %vc = and <vscale x 2 x i8> %va, splat (i8 -10)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -101,9 +93,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8_1(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i8> %va, %splat
+  %vc = and <vscale x 2 x i8> %va, splat (i8 8)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -114,9 +104,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i8> %va, %splat
+  %vc = and <vscale x 2 x i8> %va, splat (i8 16)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -148,9 +136,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i8> %va, %splat
+  %vc = and <vscale x 4 x i8> %va, splat (i8 -10)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -160,9 +146,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8_1(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i8> %va, %splat
+  %vc = and <vscale x 4 x i8> %va, splat (i8 8)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -173,9 +157,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i8> %va, %splat
+  %vc = and <vscale x 4 x i8> %va, splat (i8 16)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -207,9 +189,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i8> %va, %splat
+  %vc = and <vscale x 8 x i8> %va, splat (i8 -10)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -219,9 +199,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i8> %va, %splat
+  %vc = and <vscale x 8 x i8> %va, splat (i8 8)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -232,9 +210,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i8> %va, %splat
+  %vc = and <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -266,9 +242,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i8> %va, %splat
+  %vc = and <vscale x 16 x i8> %va, splat (i8 -10)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -278,9 +252,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8_1(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i8> %va, %splat
+  %vc = and <vscale x 16 x i8> %va, splat (i8 8)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -291,9 +263,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i8> %va, %splat
+  %vc = and <vscale x 16 x i8> %va, splat (i8 16)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -325,9 +295,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i8> %va, %splat
+  %vc = and <vscale x 32 x i8> %va, splat (i8 -10)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -337,9 +305,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8_1(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i8> %va, %splat
+  %vc = and <vscale x 32 x i8> %va, splat (i8 8)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -350,9 +316,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i8> %va, %splat
+  %vc = and <vscale x 32 x i8> %va, splat (i8 16)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -384,9 +348,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -10, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = and <vscale x 64 x i8> %va, %splat
+  %vc = and <vscale x 64 x i8> %va, splat (i8 -10)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -396,9 +358,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8_1(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = and <vscale x 64 x i8> %va, %splat
+  %vc = and <vscale x 64 x i8> %va, splat (i8 8)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -409,9 +369,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = and <vscale x 64 x i8> %va, %splat
+  %vc = and <vscale x 64 x i8> %va, splat (i8 16)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -443,9 +401,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i16> %va, %splat
+  %vc = and <vscale x 1 x i16> %va, splat (i16 -10)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -455,9 +411,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16_1(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i16> %va, %splat
+  %vc = and <vscale x 1 x i16> %va, splat (i16 8)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -468,9 +422,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i16> %va, %splat
+  %vc = and <vscale x 1 x i16> %va, splat (i16 16)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -502,9 +454,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i16> %va, %splat
+  %vc = and <vscale x 2 x i16> %va, splat (i16 -10)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -514,9 +464,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16_1(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i16> %va, %splat
+  %vc = and <vscale x 2 x i16> %va, splat (i16 8)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -527,9 +475,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i16> %va, %splat
+  %vc = and <vscale x 2 x i16> %va, splat (i16 16)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -561,9 +507,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i16> %va, %splat
+  %vc = and <vscale x 4 x i16> %va, splat (i16 -10)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -573,9 +517,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16_1(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i16> %va, %splat
+  %vc = and <vscale x 4 x i16> %va, splat (i16 8)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -586,9 +528,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i16> %va, %splat
+  %vc = and <vscale x 4 x i16> %va, splat (i16 16)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -620,9 +560,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i16> %va, %splat
+  %vc = and <vscale x 8 x i16> %va, splat (i16 -10)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -632,9 +570,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i16> %va, %splat
+  %vc = and <vscale x 8 x i16> %va, splat (i16 8)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -645,9 +581,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i16> %va, %splat
+  %vc = and <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -679,9 +613,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i16> %va, %splat
+  %vc = and <vscale x 16 x i16> %va, splat (i16 -10)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -691,9 +623,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16_1(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i16> %va, %splat
+  %vc = and <vscale x 16 x i16> %va, splat (i16 8)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -704,9 +634,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i16> %va, %splat
+  %vc = and <vscale x 16 x i16> %va, splat (i16 16)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -738,9 +666,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -10, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i16> %va, %splat
+  %vc = and <vscale x 32 x i16> %va, splat (i16 -10)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -750,9 +676,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16_1(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i16> %va, %splat
+  %vc = and <vscale x 32 x i16> %va, splat (i16 8)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -763,9 +687,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = and <vscale x 32 x i16> %va, %splat
+  %vc = and <vscale x 32 x i16> %va, splat (i16 16)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -797,9 +719,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -10, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i32> %va, %splat
+  %vc = and <vscale x 1 x i32> %va, splat (i32 -10)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -809,9 +729,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i32> %va, %splat
+  %vc = and <vscale x 1 x i32> %va, splat (i32 8)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -822,9 +740,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i32> %va, %splat
+  %vc = and <vscale x 1 x i32> %va, splat (i32 16)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -856,9 +772,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -10, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i32> %va, %splat
+  %vc = and <vscale x 2 x i32> %va, splat (i32 -10)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -868,9 +782,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i32> %va, %splat
+  %vc = and <vscale x 2 x i32> %va, splat (i32 8)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -881,9 +793,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i32> %va, %splat
+  %vc = and <vscale x 2 x i32> %va, splat (i32 16)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -915,9 +825,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -10, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i32> %va, %splat
+  %vc = and <vscale x 4 x i32> %va, splat (i32 -10)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -927,9 +835,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i32> %va, %splat
+  %vc = and <vscale x 4 x i32> %va, splat (i32 8)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -940,9 +846,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i32> %va, %splat
+  %vc = and <vscale x 4 x i32> %va, splat (i32 16)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -974,9 +878,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -10, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i32> %va, %splat
+  %vc = and <vscale x 8 x i32> %va, splat (i32 -10)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -986,9 +888,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i32> %va, %splat
+  %vc = and <vscale x 8 x i32> %va, splat (i32 8)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -999,9 +899,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i32> %va, %splat
+  %vc = and <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -1033,9 +931,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -10, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i32> %va, %splat
+  %vc = and <vscale x 16 x i32> %va, splat (i32 -10)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1045,9 +941,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32_1(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i32> %va, %splat
+  %vc = and <vscale x 16 x i32> %va, splat (i32 8)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1058,9 +952,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = and <vscale x 16 x i32> %va, %splat
+  %vc = and <vscale x 16 x i32> %va, splat (i32 16)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1105,9 +997,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -10, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i64> %va, %splat
+  %vc = and <vscale x 1 x i64> %va, splat (i64 -10)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1117,9 +1007,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i64> %va, %splat
+  %vc = and <vscale x 1 x i64> %va, splat (i64 8)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1130,9 +1018,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = and <vscale x 1 x i64> %va, %splat
+  %vc = and <vscale x 1 x i64> %va, splat (i64 16)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1177,9 +1063,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -10, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i64> %va, %splat
+  %vc = and <vscale x 2 x i64> %va, splat (i64 -10)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1189,9 +1073,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i64> %va, %splat
+  %vc = and <vscale x 2 x i64> %va, splat (i64 8)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1202,9 +1084,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = and <vscale x 2 x i64> %va, %splat
+  %vc = and <vscale x 2 x i64> %va, splat (i64 16)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1249,9 +1129,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -10, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i64> %va, %splat
+  %vc = and <vscale x 4 x i64> %va, splat (i64 -10)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1261,9 +1139,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i64> %va, %splat
+  %vc = and <vscale x 4 x i64> %va, splat (i64 8)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1274,9 +1150,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = and <vscale x 4 x i64> %va, %splat
+  %vc = and <vscale x 4 x i64> %va, splat (i64 16)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1321,9 +1195,7 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, -10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -10, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i64> %va, %splat
+  %vc = and <vscale x 8 x i64> %va, splat (i64 -10)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1333,9 +1205,7 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i64> %va, %splat
+  %vc = and <vscale x 8 x i64> %va, splat (i64 8)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1346,9 +1216,7 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = and <vscale x 8 x i64> %va, %splat
+  %vc = and <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1389,9 +1257,7 @@ define <vscale x 8 x i32> @vand_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %allones
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 -1)
   %vc = and <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1402,11 +1268,9 @@ define <vscale x 8 x i32> @vand_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %allones
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 -1)
   %vc = and <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1417,11 +1281,7 @@ define <vscale x 8 x i32> @vand_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vand.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head2 = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %allones
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 -1)
   %vc = and <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
index 032c3a014eca99..7b4a68d5867f99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vand_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -62,9 +60,7 @@ define <vscale x 1 x i8> @vand_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -74,9 +70,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,11 +80,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -112,9 +102,7 @@ define <vscale x 2 x i8> @vand_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -138,9 +126,7 @@ define <vscale x 2 x i8> @vand_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -150,9 +136,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -162,11 +146,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -188,9 +168,7 @@ define <vscale x 4 x i8> @vand_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -214,9 +192,7 @@ define <vscale x 4 x i8> @vand_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -226,9 +202,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -238,11 +212,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -264,9 +234,7 @@ define <vscale x 8 x i8> @vand_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -290,9 +258,7 @@ define <vscale x 8 x i8> @vand_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -302,9 +268,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -314,11 +278,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -340,9 +300,7 @@ define <vscale x 16 x i8> @vand_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -366,9 +324,7 @@ define <vscale x 16 x i8> @vand_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -378,9 +334,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -390,11 +344,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -416,9 +366,7 @@ define <vscale x 32 x i8> @vand_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -442,9 +390,7 @@ define <vscale x 32 x i8> @vand_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -454,9 +400,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -466,11 +410,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -492,9 +432,7 @@ define <vscale x 64 x i8> @vand_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -518,9 +456,7 @@ define <vscale x 64 x i8> @vand_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -530,9 +466,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -542,11 +476,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -568,9 +498,7 @@ define <vscale x 1 x i16> @vand_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -594,9 +522,7 @@ define <vscale x 1 x i16> @vand_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -606,9 +532,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -618,11 +542,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -644,9 +564,7 @@ define <vscale x 2 x i16> @vand_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -670,9 +588,7 @@ define <vscale x 2 x i16> @vand_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -682,9 +598,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -694,11 +608,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -720,9 +630,7 @@ define <vscale x 4 x i16> @vand_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -746,9 +654,7 @@ define <vscale x 4 x i16> @vand_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -758,9 +664,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -770,11 +674,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -796,9 +696,7 @@ define <vscale x 8 x i16> @vand_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -822,9 +720,7 @@ define <vscale x 8 x i16> @vand_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -834,9 +730,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -846,11 +740,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -872,9 +762,7 @@ define <vscale x 14 x i16> @vand_vv_nxv14i16_unmasked(<vscale x 14 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 14 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> poison, <vscale x 14 x i32> zeroinitializer
-  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 14 x i16> %v
 }
 
@@ -898,9 +786,7 @@ define <vscale x 14 x i16> @vand_vx_nxv14i16_unmasked(<vscale x 14 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
-  %head = insertelement <vscale x 14 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> poison, <vscale x 14 x i32> zeroinitializer
-  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 14 x i16> %v
 }
 
@@ -910,9 +796,7 @@ define <vscale x 14 x i16> @vand_vi_nxv14i16(<vscale x 14 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 14 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
-  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> %m, i32 %evl)
   ret <vscale x 14 x i16> %v
 }
 
@@ -922,11 +806,7 @@ define <vscale x 14 x i16> @vand_vi_nxv14i16_unmasked(<vscale x 14 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 14 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
-  %head = insertelement <vscale x 14 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> poison, <vscale x 14 x i32> zeroinitializer
-  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 14 x i16> %v
 }
 
@@ -948,9 +828,7 @@ define <vscale x 16 x i16> @vand_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -974,9 +852,7 @@ define <vscale x 16 x i16> @vand_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -986,9 +862,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -998,11 +872,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1024,9 +894,7 @@ define <vscale x 32 x i16> @vand_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1062,9 +930,7 @@ define <vscale x 32 x i16> @vand_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1074,9 +940,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1086,11 +950,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1112,9 +972,7 @@ define <vscale x 1 x i32> @vand_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1138,9 +996,7 @@ define <vscale x 1 x i32> @vand_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1150,9 +1006,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1162,11 +1016,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1188,9 +1038,7 @@ define <vscale x 2 x i32> @vand_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1214,9 +1062,7 @@ define <vscale x 2 x i32> @vand_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1226,9 +1072,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1238,11 +1082,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1264,9 +1104,7 @@ define <vscale x 4 x i32> @vand_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1290,9 +1128,7 @@ define <vscale x 4 x i32> @vand_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1302,9 +1138,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1314,11 +1148,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1340,9 +1170,7 @@ define <vscale x 8 x i32> @vand_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1366,9 +1194,7 @@ define <vscale x 8 x i32> @vand_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1378,9 +1204,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1390,11 +1214,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1416,9 +1236,7 @@ define <vscale x 16 x i32> @vand_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1442,9 +1260,7 @@ define <vscale x 16 x i32> @vand_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1454,9 +1270,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1466,11 +1280,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1492,9 +1302,7 @@ define <vscale x 1 x i64> @vand_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1546,9 +1354,7 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1558,9 +1364,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1570,11 +1374,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1596,9 +1396,7 @@ define <vscale x 2 x i64> @vand_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1650,9 +1448,7 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1662,9 +1458,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1674,11 +1468,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1700,9 +1490,7 @@ define <vscale x 4 x i64> @vand_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1754,9 +1542,7 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1766,9 +1552,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1778,11 +1562,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1804,9 +1584,7 @@ define <vscale x 8 x i64> @vand_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1858,9 +1636,7 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1870,9 +1646,7 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1882,10 +1656,6 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f9c53c93472a62..f25a3f937f1b4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -17,9 +17,7 @@ define <vscale x 1 x i8> @vandn_vv_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i8> %x, %splat
+  %a = xor <vscale x 1 x i8> %x, splat (i8 -1)
   %b = and <vscale x 1 x i8> %a, %y
   ret <vscale x 1 x i8> %b
 }
@@ -37,9 +35,7 @@ define <vscale x 1 x i8> @vandn_vv_swapped_nxv1i8(<vscale x 1 x i8> %x, <vscale
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i8> %x, %splat
+  %a = xor <vscale x 1 x i8> %x, splat (i8 -1)
   %b = and <vscale x 1 x i8> %y, %a
   ret <vscale x 1 x i8> %b
 }
@@ -97,9 +93,7 @@ define <vscale x 2 x i8> @vandn_vv_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i8> %x, %splat
+  %a = xor <vscale x 2 x i8> %x, splat (i8 -1)
   %b = and <vscale x 2 x i8> %a, %y
   ret <vscale x 2 x i8> %b
 }
@@ -117,9 +111,7 @@ define <vscale x 2 x i8> @vandn_vv_swapped_nxv2i8(<vscale x 2 x i8> %x, <vscale
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i8> %x, %splat
+  %a = xor <vscale x 2 x i8> %x, splat (i8 -1)
   %b = and <vscale x 2 x i8> %y, %a
   ret <vscale x 2 x i8> %b
 }
@@ -177,9 +169,7 @@ define <vscale x 4 x i8> @vandn_vv_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i8> %x, %splat
+  %a = xor <vscale x 4 x i8> %x, splat (i8 -1)
   %b = and <vscale x 4 x i8> %a, %y
   ret <vscale x 4 x i8> %b
 }
@@ -197,9 +187,7 @@ define <vscale x 4 x i8> @vandn_vv_swapped_nxv4i8(<vscale x 4 x i8> %x, <vscale
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i8> %x, %splat
+  %a = xor <vscale x 4 x i8> %x, splat (i8 -1)
   %b = and <vscale x 4 x i8> %y, %a
   ret <vscale x 4 x i8> %b
 }
@@ -257,9 +245,7 @@ define <vscale x 8 x i8> @vandn_vv_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i8> %x, %splat
+  %a = xor <vscale x 8 x i8> %x, splat (i8 -1)
   %b = and <vscale x 8 x i8> %a, %y
   ret <vscale x 8 x i8> %b
 }
@@ -277,9 +263,7 @@ define <vscale x 8 x i8> @vandn_vv_swapped_nxv8i8(<vscale x 8 x i8> %x, <vscale
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i8> %x, %splat
+  %a = xor <vscale x 8 x i8> %x, splat (i8 -1)
   %b = and <vscale x 8 x i8> %y, %a
   ret <vscale x 8 x i8> %b
 }
@@ -337,9 +321,7 @@ define <vscale x 16 x i8> @vandn_vv_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i8> %x, %splat
+  %a = xor <vscale x 16 x i8> %x, splat (i8 -1)
   %b = and <vscale x 16 x i8> %a, %y
   ret <vscale x 16 x i8> %b
 }
@@ -357,9 +339,7 @@ define <vscale x 16 x i8> @vandn_vv_swapped_nxv16i8(<vscale x 16 x i8> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i8> %x, %splat
+  %a = xor <vscale x 16 x i8> %x, splat (i8 -1)
   %b = and <vscale x 16 x i8> %y, %a
   ret <vscale x 16 x i8> %b
 }
@@ -417,9 +397,7 @@ define <vscale x 32 x i8> @vandn_vv_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %a = xor <vscale x 32 x i8> %x, %splat
+  %a = xor <vscale x 32 x i8> %x, splat (i8 -1)
   %b = and <vscale x 32 x i8> %a, %y
   ret <vscale x 32 x i8> %b
 }
@@ -437,9 +415,7 @@ define <vscale x 32 x i8> @vandn_vv_swapped_nxv32i8(<vscale x 32 x i8> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %a = xor <vscale x 32 x i8> %x, %splat
+  %a = xor <vscale x 32 x i8> %x, splat (i8 -1)
   %b = and <vscale x 32 x i8> %y, %a
   ret <vscale x 32 x i8> %b
 }
@@ -497,9 +473,7 @@ define <vscale x 64 x i8> @vandn_vv_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %a = xor <vscale x 64 x i8> %x, %splat
+  %a = xor <vscale x 64 x i8> %x, splat (i8 -1)
   %b = and <vscale x 64 x i8> %a, %y
   ret <vscale x 64 x i8> %b
 }
@@ -517,9 +491,7 @@ define <vscale x 64 x i8> @vandn_vv_swapped_nxv64i8(<vscale x 64 x i8> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %a = xor <vscale x 64 x i8> %x, %splat
+  %a = xor <vscale x 64 x i8> %x, splat (i8 -1)
   %b = and <vscale x 64 x i8> %y, %a
   ret <vscale x 64 x i8> %b
 }
@@ -577,9 +549,7 @@ define <vscale x 1 x i16> @vandn_vv_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i16> %x, %splat
+  %a = xor <vscale x 1 x i16> %x, splat (i16 -1)
   %b = and <vscale x 1 x i16> %a, %y
   ret <vscale x 1 x i16> %b
 }
@@ -597,9 +567,7 @@ define <vscale x 1 x i16> @vandn_vv_swapped_nxv1i16(<vscale x 1 x i16> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i16> %x, %splat
+  %a = xor <vscale x 1 x i16> %x, splat (i16 -1)
   %b = and <vscale x 1 x i16> %y, %a
   ret <vscale x 1 x i16> %b
 }
@@ -657,9 +625,7 @@ define <vscale x 2 x i16> @vandn_vv_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i16> %x, %splat
+  %a = xor <vscale x 2 x i16> %x, splat (i16 -1)
   %b = and <vscale x 2 x i16> %a, %y
   ret <vscale x 2 x i16> %b
 }
@@ -677,9 +643,7 @@ define <vscale x 2 x i16> @vandn_vv_swapped_nxv2i16(<vscale x 2 x i16> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i16> %x, %splat
+  %a = xor <vscale x 2 x i16> %x, splat (i16 -1)
   %b = and <vscale x 2 x i16> %y, %a
   ret <vscale x 2 x i16> %b
 }
@@ -737,9 +701,7 @@ define <vscale x 4 x i16> @vandn_vv_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i16> %x, %splat
+  %a = xor <vscale x 4 x i16> %x, splat (i16 -1)
   %b = and <vscale x 4 x i16> %a, %y
   ret <vscale x 4 x i16> %b
 }
@@ -757,9 +719,7 @@ define <vscale x 4 x i16> @vandn_vv_swapped_nxv4i16(<vscale x 4 x i16> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i16> %x, %splat
+  %a = xor <vscale x 4 x i16> %x, splat (i16 -1)
   %b = and <vscale x 4 x i16> %y, %a
   ret <vscale x 4 x i16> %b
 }
@@ -817,9 +777,7 @@ define <vscale x 8 x i16> @vandn_vv_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i16> %x, %splat
+  %a = xor <vscale x 8 x i16> %x, splat (i16 -1)
   %b = and <vscale x 8 x i16> %a, %y
   ret <vscale x 8 x i16> %b
 }
@@ -837,9 +795,7 @@ define <vscale x 8 x i16> @vandn_vv_swapped_nxv8i16(<vscale x 8 x i16> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i16> %x, %splat
+  %a = xor <vscale x 8 x i16> %x, splat (i16 -1)
   %b = and <vscale x 8 x i16> %y, %a
   ret <vscale x 8 x i16> %b
 }
@@ -897,9 +853,7 @@ define <vscale x 16 x i16> @vandn_vv_nxv16i16(<vscale x 16 x i16> %x, <vscale x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i16> %x, %splat
+  %a = xor <vscale x 16 x i16> %x, splat (i16 -1)
   %b = and <vscale x 16 x i16> %a, %y
   ret <vscale x 16 x i16> %b
 }
@@ -917,9 +871,7 @@ define <vscale x 16 x i16> @vandn_vv_swapped_nxv16i16(<vscale x 16 x i16> %x, <v
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i16> %x, %splat
+  %a = xor <vscale x 16 x i16> %x, splat (i16 -1)
   %b = and <vscale x 16 x i16> %y, %a
   ret <vscale x 16 x i16> %b
 }
@@ -977,9 +929,7 @@ define <vscale x 32 x i16> @vandn_vv_nxv32i16(<vscale x 32 x i16> %x, <vscale x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %a = xor <vscale x 32 x i16> %x, %splat
+  %a = xor <vscale x 32 x i16> %x, splat (i16 -1)
   %b = and <vscale x 32 x i16> %a, %y
   ret <vscale x 32 x i16> %b
 }
@@ -997,9 +947,7 @@ define <vscale x 32 x i16> @vandn_vv_swapped_nxv32i16(<vscale x 32 x i16> %x, <v
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %a = xor <vscale x 32 x i16> %x, %splat
+  %a = xor <vscale x 32 x i16> %x, splat (i16 -1)
   %b = and <vscale x 32 x i16> %y, %a
   ret <vscale x 32 x i16> %b
 }
@@ -1057,9 +1005,7 @@ define <vscale x 1 x i32> @vandn_vv_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i32> %x, %splat
+  %a = xor <vscale x 1 x i32> %x, splat (i32 -1)
   %b = and <vscale x 1 x i32> %a, %y
   ret <vscale x 1 x i32> %b
 }
@@ -1077,9 +1023,7 @@ define <vscale x 1 x i32> @vandn_vv_swapped_nxv1i32(<vscale x 1 x i32> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i32> %x, %splat
+  %a = xor <vscale x 1 x i32> %x, splat (i32 -1)
   %b = and <vscale x 1 x i32> %y, %a
   ret <vscale x 1 x i32> %b
 }
@@ -1137,9 +1081,7 @@ define <vscale x 2 x i32> @vandn_vv_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i32> %x, %splat
+  %a = xor <vscale x 2 x i32> %x, splat (i32 -1)
   %b = and <vscale x 2 x i32> %a, %y
   ret <vscale x 2 x i32> %b
 }
@@ -1157,9 +1099,7 @@ define <vscale x 2 x i32> @vandn_vv_swapped_nxv2i32(<vscale x 2 x i32> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i32> %x, %splat
+  %a = xor <vscale x 2 x i32> %x, splat (i32 -1)
   %b = and <vscale x 2 x i32> %y, %a
   ret <vscale x 2 x i32> %b
 }
@@ -1217,9 +1157,7 @@ define <vscale x 4 x i32> @vandn_vv_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i32> %x, %splat
+  %a = xor <vscale x 4 x i32> %x, splat (i32 -1)
   %b = and <vscale x 4 x i32> %a, %y
   ret <vscale x 4 x i32> %b
 }
@@ -1237,9 +1175,7 @@ define <vscale x 4 x i32> @vandn_vv_swapped_nxv4i32(<vscale x 4 x i32> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i32> %x, %splat
+  %a = xor <vscale x 4 x i32> %x, splat (i32 -1)
   %b = and <vscale x 4 x i32> %y, %a
   ret <vscale x 4 x i32> %b
 }
@@ -1297,9 +1233,7 @@ define <vscale x 8 x i32> @vandn_vv_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i32> %x, %splat
+  %a = xor <vscale x 8 x i32> %x, splat (i32 -1)
   %b = and <vscale x 8 x i32> %a, %y
   ret <vscale x 8 x i32> %b
 }
@@ -1317,9 +1251,7 @@ define <vscale x 8 x i32> @vandn_vv_swapped_nxv8i32(<vscale x 8 x i32> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i32> %x, %splat
+  %a = xor <vscale x 8 x i32> %x, splat (i32 -1)
   %b = and <vscale x 8 x i32> %y, %a
   ret <vscale x 8 x i32> %b
 }
@@ -1377,9 +1309,7 @@ define <vscale x 16 x i32> @vandn_vv_nxv16i32(<vscale x 16 x i32> %x, <vscale x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i32> %x, %splat
+  %a = xor <vscale x 16 x i32> %x, splat (i32 -1)
   %b = and <vscale x 16 x i32> %a, %y
   ret <vscale x 16 x i32> %b
 }
@@ -1397,9 +1327,7 @@ define <vscale x 16 x i32> @vandn_vv_swapped_nxv16i32(<vscale x 16 x i32> %x, <v
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %a = xor <vscale x 16 x i32> %x, %splat
+  %a = xor <vscale x 16 x i32> %x, splat (i32 -1)
   %b = and <vscale x 16 x i32> %y, %a
   ret <vscale x 16 x i32> %b
 }
@@ -1457,9 +1385,7 @@ define <vscale x 1 x i64> @vandn_vv_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i64> %x, %splat
+  %a = xor <vscale x 1 x i64> %x, splat (i64 -1)
   %b = and <vscale x 1 x i64> %a, %y
   ret <vscale x 1 x i64> %b
 }
@@ -1477,9 +1403,7 @@ define <vscale x 1 x i64> @vandn_vv_swapped_nxv1i64(<vscale x 1 x i64> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %a = xor <vscale x 1 x i64> %x, %splat
+  %a = xor <vscale x 1 x i64> %x, splat (i64 -1)
   %b = and <vscale x 1 x i64> %y, %a
   ret <vscale x 1 x i64> %b
 }
@@ -1597,9 +1521,7 @@ define <vscale x 2 x i64> @vandn_vv_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i64> %x, %splat
+  %a = xor <vscale x 2 x i64> %x, splat (i64 -1)
   %b = and <vscale x 2 x i64> %a, %y
   ret <vscale x 2 x i64> %b
 }
@@ -1617,9 +1539,7 @@ define <vscale x 2 x i64> @vandn_vv_swapped_nxv2i64(<vscale x 2 x i64> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %a = xor <vscale x 2 x i64> %x, %splat
+  %a = xor <vscale x 2 x i64> %x, splat (i64 -1)
   %b = and <vscale x 2 x i64> %y, %a
   ret <vscale x 2 x i64> %b
 }
@@ -1737,9 +1657,7 @@ define <vscale x 4 x i64> @vandn_vv_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i64> %x, %splat
+  %a = xor <vscale x 4 x i64> %x, splat (i64 -1)
   %b = and <vscale x 4 x i64> %a, %y
   ret <vscale x 4 x i64> %b
 }
@@ -1757,9 +1675,7 @@ define <vscale x 4 x i64> @vandn_vv_swapped_nxv4i64(<vscale x 4 x i64> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %a = xor <vscale x 4 x i64> %x, %splat
+  %a = xor <vscale x 4 x i64> %x, splat (i64 -1)
   %b = and <vscale x 4 x i64> %y, %a
   ret <vscale x 4 x i64> %b
 }
@@ -1877,9 +1793,7 @@ define <vscale x 8 x i64> @vandn_vv_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i64> %x, %splat
+  %a = xor <vscale x 8 x i64> %x, splat (i64 -1)
   %b = and <vscale x 8 x i64> %a, %y
   ret <vscale x 8 x i64> %b
 }
@@ -1897,9 +1811,7 @@ define <vscale x 8 x i64> @vandn_vv_swapped_nxv8i64(<vscale x 8 x i64> %x, <vsca
 ; CHECK-ZVKB-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8
 ; CHECK-ZVKB-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %a = xor <vscale x 8 x i64> %x, %splat
+  %a = xor <vscale x 8 x i64> %x, splat (i64 -1)
   %b = and <vscale x 8 x i64> %y, %a
   ret <vscale x 8 x i64> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index 4de71b6ce06fbb..939a45e15c1037 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfsgnj_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -94,9 +92,7 @@ define <vscale x 2 x half> @vfsgnj_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -140,9 +136,7 @@ define <vscale x 4 x half> @vfsgnj_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -186,9 +180,7 @@ define <vscale x 8 x half> @vfsgnj_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -232,9 +224,7 @@ define <vscale x 16 x half> @vfsgnj_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -311,8 +301,6 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -320,6 +308,8 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -348,9 +338,7 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -372,9 +360,7 @@ define <vscale x 1 x float> @vfsgnj_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -396,9 +382,7 @@ define <vscale x 2 x float> @vfsgnj_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -420,9 +404,7 @@ define <vscale x 4 x float> @vfsgnj_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -444,9 +426,7 @@ define <vscale x 8 x float> @vfsgnj_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -468,9 +448,7 @@ define <vscale x 16 x float> @vfsgnj_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -492,9 +470,7 @@ define <vscale x 1 x double> @vfsgnj_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -516,9 +492,7 @@ define <vscale x 2 x double> @vfsgnj_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -540,9 +514,7 @@ define <vscale x 4 x double> @vfsgnj_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -564,8 +536,6 @@ define <vscale x 8 x double> @vfsgnj_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
index 0028ac88cc4fed..ef9b2104b2d2d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
@@ -37,9 +37,7 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i8> %va, %splat
+  %vc = sdiv <vscale x 1 x i8> %va, splat (i8 -7)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -48,9 +46,7 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv1i8_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i8> %va, %splat
+  %vc = sdiv <vscale x 1 x i8> %va, splat (i8 1)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -61,9 +57,7 @@ define <vscale x 1 x i8> @vdiv_iv_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i8> %splat, %va
+  %vc = sdiv <vscale x 1 x i8> splat (i8 0), %va
   ret <vscale x 1 x i8> %vc
 }
 
@@ -100,9 +94,7 @@ define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sdiv <vscale x 2 x i8> %va, %splat
+  %vc = sdiv <vscale x 2 x i8> %va, splat (i8 -7)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -139,9 +131,7 @@ define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sdiv <vscale x 4 x i8> %va, %splat
+  %vc = sdiv <vscale x 4 x i8> %va, splat (i8 -7)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -178,9 +168,7 @@ define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sdiv <vscale x 8 x i8> %va, %splat
+  %vc = sdiv <vscale x 8 x i8> %va, splat (i8 -7)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -217,9 +205,7 @@ define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v10, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sdiv <vscale x 16 x i8> %va, %splat
+  %vc = sdiv <vscale x 16 x i8> %va, splat (i8 -7)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -256,9 +242,7 @@ define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v12, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sdiv <vscale x 32 x i8> %va, %splat
+  %vc = sdiv <vscale x 32 x i8> %va, splat (i8 -7)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -295,9 +279,7 @@ define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsrl.vi v16, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = sdiv <vscale x 64 x i8> %va, %splat
+  %vc = sdiv <vscale x 64 x i8> %va, splat (i8 -7)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -334,9 +316,7 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i16> %va, %splat
+  %vc = sdiv <vscale x 1 x i16> %va, splat (i16 -7)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -373,9 +353,7 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sdiv <vscale x 2 x i16> %va, %splat
+  %vc = sdiv <vscale x 2 x i16> %va, splat (i16 -7)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -412,9 +390,7 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v9, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sdiv <vscale x 4 x i16> %va, %splat
+  %vc = sdiv <vscale x 4 x i16> %va, splat (i16 -7)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -451,9 +427,7 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v10, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sdiv <vscale x 8 x i16> %va, %splat
+  %vc = sdiv <vscale x 8 x i16> %va, splat (i16 -7)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -490,9 +464,7 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v12, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sdiv <vscale x 16 x i16> %va, %splat
+  %vc = sdiv <vscale x 16 x i16> %va, splat (i16 -7)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -529,9 +501,7 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsrl.vi v16, v8, 15
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sdiv <vscale x 32 x i16> %va, %splat
+  %vc = sdiv <vscale x 32 x i16> %va, splat (i16 -7)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -581,9 +551,7 @@ define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV64-NEXT:    vsrl.vi v9, v8, 31
 ; RV64-NEXT:    vadd.vv v8, v8, v9
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i32> %va, %splat
+  %vc = sdiv <vscale x 1 x i32> %va, splat (i32 -7)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -633,9 +601,7 @@ define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV64-NEXT:    vsrl.vi v9, v8, 31
 ; RV64-NEXT:    vadd.vv v8, v8, v9
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sdiv <vscale x 2 x i32> %va, %splat
+  %vc = sdiv <vscale x 2 x i32> %va, splat (i32 -7)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -685,9 +651,7 @@ define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV64-NEXT:    vsrl.vi v10, v8, 31
 ; RV64-NEXT:    vadd.vv v8, v8, v10
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sdiv <vscale x 4 x i32> %va, %splat
+  %vc = sdiv <vscale x 4 x i32> %va, splat (i32 -7)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -737,9 +701,7 @@ define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV64-NEXT:    vsrl.vi v12, v8, 31
 ; RV64-NEXT:    vadd.vv v8, v8, v12
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sdiv <vscale x 8 x i32> %va, %splat
+  %vc = sdiv <vscale x 8 x i32> %va, splat (i32 -7)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -789,9 +751,7 @@ define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    vsrl.vi v16, v8, 31
 ; RV64-NEXT:    vadd.vv v8, v8, v16
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sdiv <vscale x 16 x i32> %va, %splat
+  %vc = sdiv <vscale x 16 x i32> %va, splat (i32 -7)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -870,9 +830,7 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-V-NEXT:    vsra.vi v8, v8, 1
 ; RV64-V-NEXT:    vadd.vv v8, v8, v9
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sdiv <vscale x 1 x i64> %va, %splat
+  %vc = sdiv <vscale x 1 x i64> %va, splat (i64 -7)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -951,9 +909,7 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-V-NEXT:    vsra.vi v8, v8, 1
 ; RV64-V-NEXT:    vadd.vv v8, v8, v10
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sdiv <vscale x 2 x i64> %va, %splat
+  %vc = sdiv <vscale x 2 x i64> %va, splat (i64 -7)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1032,9 +988,7 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-V-NEXT:    vsra.vi v8, v8, 1
 ; RV64-V-NEXT:    vadd.vv v8, v8, v12
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sdiv <vscale x 4 x i64> %va, %splat
+  %vc = sdiv <vscale x 4 x i64> %va, splat (i64 -7)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1113,9 +1067,7 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-V-NEXT:    vsra.vi v8, v8, 1
 ; RV64-V-NEXT:    vadd.vv v8, v8, v16
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sdiv <vscale x 8 x i64> %va, %splat
+  %vc = sdiv <vscale x 8 x i64> %va, splat (i64 -7)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1127,9 +1079,7 @@ define <vscale x 8 x i32> @vdiv_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
   %vc = sdiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1142,11 +1092,9 @@ define <vscale x 8 x i32> @vdiv_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
 ; CHECK-NEXT:    vmerge.vxm v12, v12, a0, v0
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
   %vc = sdiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1159,11 +1107,7 @@ define <vscale x 8 x i32> @vdiv_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vim v12, v12, 7, v0
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head2 = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
   %vc = sdiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
index fd951d5cfbffdf..26089706cf99ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
@@ -39,9 +39,7 @@ define <vscale x 1 x i8> @vdiv_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <vscale x 1 x i8> @vdiv_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <vscale x 2 x i8> @vdiv_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sdiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sdiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -115,9 +109,7 @@ define <vscale x 2 x i8> @vdiv_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sdiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sdiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <vscale x 4 x i8> @vdiv_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <vscale x 4 x i8> @vdiv_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <vscale x 8 x i8> @vdiv_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sdiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sdiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <vscale x 8 x i8> @vdiv_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sdiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sdiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <vscale x 16 x i8> @vdiv_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sdiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sdiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <vscale x 16 x i8> @vdiv_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sdiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sdiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -301,9 +281,7 @@ define <vscale x 32 x i8> @vdiv_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sdiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sdiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -327,9 +305,7 @@ define <vscale x 32 x i8> @vdiv_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sdiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sdiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -351,9 +327,7 @@ define <vscale x 64 x i8> @vdiv_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sdiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sdiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -377,9 +351,7 @@ define <vscale x 64 x i8> @vdiv_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sdiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sdiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -401,9 +373,7 @@ define <vscale x 1 x i16> @vdiv_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sdiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sdiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -427,9 +397,7 @@ define <vscale x 1 x i16> @vdiv_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sdiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sdiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -451,9 +419,7 @@ define <vscale x 2 x i16> @vdiv_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sdiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sdiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -477,9 +443,7 @@ define <vscale x 2 x i16> @vdiv_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sdiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sdiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -501,9 +465,7 @@ define <vscale x 4 x i16> @vdiv_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sdiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sdiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -527,9 +489,7 @@ define <vscale x 4 x i16> @vdiv_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sdiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sdiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -551,9 +511,7 @@ define <vscale x 8 x i16> @vdiv_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sdiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sdiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -577,9 +535,7 @@ define <vscale x 8 x i16> @vdiv_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sdiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sdiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -601,9 +557,7 @@ define <vscale x 16 x i16> @vdiv_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sdiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sdiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -627,9 +581,7 @@ define <vscale x 16 x i16> @vdiv_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sdiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sdiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -651,9 +603,7 @@ define <vscale x 32 x i16> @vdiv_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sdiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sdiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -677,9 +627,7 @@ define <vscale x 32 x i16> @vdiv_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sdiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sdiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -701,9 +649,7 @@ define <vscale x 1 x i32> @vdiv_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sdiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sdiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -727,9 +673,7 @@ define <vscale x 1 x i32> @vdiv_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sdiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sdiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -751,9 +695,7 @@ define <vscale x 2 x i32> @vdiv_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sdiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sdiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -777,9 +719,7 @@ define <vscale x 2 x i32> @vdiv_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sdiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sdiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -801,9 +741,7 @@ define <vscale x 4 x i32> @vdiv_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sdiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sdiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -827,9 +765,7 @@ define <vscale x 4 x i32> @vdiv_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sdiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sdiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -851,9 +787,7 @@ define <vscale x 8 x i32> @vdiv_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sdiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sdiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -877,9 +811,7 @@ define <vscale x 8 x i32> @vdiv_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sdiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sdiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -901,9 +833,7 @@ define <vscale x 16 x i32> @vdiv_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sdiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sdiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -927,9 +857,7 @@ define <vscale x 16 x i32> @vdiv_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sdiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sdiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -951,9 +879,7 @@ define <vscale x 1 x i64> @vdiv_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1005,9 +931,7 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1029,9 +953,7 @@ define <vscale x 2 x i64> @vdiv_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sdiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sdiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1083,9 +1005,7 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sdiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sdiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1107,9 +1027,7 @@ define <vscale x 4 x i64> @vdiv_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sdiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sdiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1161,9 +1079,7 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sdiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sdiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1185,9 +1101,7 @@ define <vscale x 8 x i64> @vdiv_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sdiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sdiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1239,8 +1153,6 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sdiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sdiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
index c505cb3d1bbd4a..4f2fb937ca73f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
@@ -34,9 +34,7 @@ define <vscale x 1 x i8> @vdivu_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i8> %va, %splat
+  %vc = udiv <vscale x 1 x i8> %va, splat (i8 -7)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -45,9 +43,7 @@ define <vscale x 1 x i8> @vdivu_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv1i8_1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i8> %va, %splat
+  %vc = udiv <vscale x 1 x i8> %va, splat (i8 1)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -58,9 +54,7 @@ define <vscale x 1 x i8> @vdivu_iv_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 0, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i8> %splat, %va
+  %vc = udiv <vscale x 1 x i8> splat (i8 0), %va
   ret <vscale x 1 x i8> %vc
 }
 
@@ -94,9 +88,7 @@ define <vscale x 2 x i8> @vdivu_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = udiv <vscale x 2 x i8> %va, %splat
+  %vc = udiv <vscale x 2 x i8> %va, splat (i8 -7)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -130,9 +122,7 @@ define <vscale x 4 x i8> @vdivu_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = udiv <vscale x 4 x i8> %va, %splat
+  %vc = udiv <vscale x 4 x i8> %va, splat (i8 -7)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -166,9 +156,7 @@ define <vscale x 8 x i8> @vdivu_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = udiv <vscale x 8 x i8> %va, %splat
+  %vc = udiv <vscale x 8 x i8> %va, splat (i8 -7)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -202,9 +190,7 @@ define <vscale x 16 x i8> @vdivu_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = udiv <vscale x 16 x i8> %va, %splat
+  %vc = udiv <vscale x 16 x i8> %va, splat (i8 -7)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -238,9 +224,7 @@ define <vscale x 32 x i8> @vdivu_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = udiv <vscale x 32 x i8> %va, %splat
+  %vc = udiv <vscale x 32 x i8> %va, splat (i8 -7)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -274,9 +258,7 @@ define <vscale x 64 x i8> @vdivu_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = udiv <vscale x 64 x i8> %va, %splat
+  %vc = udiv <vscale x 64 x i8> %va, splat (i8 -7)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -311,9 +293,7 @@ define <vscale x 1 x i16> @vdivu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i16> %va, %splat
+  %vc = udiv <vscale x 1 x i16> %va, splat (i16 -7)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -348,9 +328,7 @@ define <vscale x 2 x i16> @vdivu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = udiv <vscale x 2 x i16> %va, %splat
+  %vc = udiv <vscale x 2 x i16> %va, splat (i16 -7)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -385,9 +363,7 @@ define <vscale x 4 x i16> @vdivu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = udiv <vscale x 4 x i16> %va, %splat
+  %vc = udiv <vscale x 4 x i16> %va, splat (i16 -7)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -422,9 +398,7 @@ define <vscale x 8 x i16> @vdivu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = udiv <vscale x 8 x i16> %va, %splat
+  %vc = udiv <vscale x 8 x i16> %va, splat (i16 -7)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -459,9 +433,7 @@ define <vscale x 16 x i16> @vdivu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = udiv <vscale x 16 x i16> %va, %splat
+  %vc = udiv <vscale x 16 x i16> %va, splat (i16 -7)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -496,9 +468,7 @@ define <vscale x 32 x i16> @vdivu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 13
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = udiv <vscale x 32 x i16> %va, %splat
+  %vc = udiv <vscale x 32 x i16> %va, splat (i16 -7)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -533,9 +503,7 @@ define <vscale x 1 x i32> @vdivu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 29
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i32> %va, %splat
+  %vc = udiv <vscale x 1 x i32> %va, splat (i32 -7)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -570,9 +538,7 @@ define <vscale x 2 x i32> @vdivu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 29
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = udiv <vscale x 2 x i32> %va, %splat
+  %vc = udiv <vscale x 2 x i32> %va, splat (i32 -7)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -607,9 +573,7 @@ define <vscale x 4 x i32> @vdivu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 29
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = udiv <vscale x 4 x i32> %va, %splat
+  %vc = udiv <vscale x 4 x i32> %va, splat (i32 -7)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -644,9 +608,7 @@ define <vscale x 8 x i32> @vdivu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 29
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = udiv <vscale x 8 x i32> %va, %splat
+  %vc = udiv <vscale x 8 x i32> %va, splat (i32 -7)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -681,9 +643,7 @@ define <vscale x 16 x i32> @vdivu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 29
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = udiv <vscale x 16 x i32> %va, %splat
+  %vc = udiv <vscale x 16 x i32> %va, splat (i32 -7)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -757,9 +717,7 @@ define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-V-NEXT:    li a0, 61
 ; RV64-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i64> %va, %splat
+  %vc = udiv <vscale x 1 x i64> %va, splat (i64 -7)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -769,9 +727,7 @@ define <vscale x 1 x i64> @vdivu_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = udiv <vscale x 1 x i64> %va, %splat
+  %vc = udiv <vscale x 1 x i64> %va, splat (i64 2)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -783,9 +739,7 @@ define <vscale x 1 x i64> @vdivu_vi_nxv1i64_2(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v9, v9, 4
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i64> %splat, %vb
+  %vc = shl <vscale x 1 x i64> splat (i64 16), %vb
   %vd = udiv <vscale x 1 x i64> %va, %vc
   ret <vscale x 1 x i64> %vd
 }
@@ -860,9 +814,7 @@ define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-V-NEXT:    li a0, 61
 ; RV64-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = udiv <vscale x 2 x i64> %va, %splat
+  %vc = udiv <vscale x 2 x i64> %va, splat (i64 -7)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -872,9 +824,7 @@ define <vscale x 2 x i64> @vdivu_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = udiv <vscale x 2 x i64> %va, %splat
+  %vc = udiv <vscale x 2 x i64> %va, splat (i64 2)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -886,9 +836,7 @@ define <vscale x 2 x i64> @vdivu_vi_nxv2i64_2(<vscale x 2 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v10, v10, 4
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i64> %splat, %vb
+  %vc = shl <vscale x 2 x i64> splat (i64 16), %vb
   %vd = udiv <vscale x 2 x i64> %va, %vc
   ret <vscale x 2 x i64> %vd
 }
@@ -963,9 +911,7 @@ define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-V-NEXT:    li a0, 61
 ; RV64-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = udiv <vscale x 4 x i64> %va, %splat
+  %vc = udiv <vscale x 4 x i64> %va, splat (i64 -7)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -975,9 +921,7 @@ define <vscale x 4 x i64> @vdivu_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = udiv <vscale x 4 x i64> %va, %splat
+  %vc = udiv <vscale x 4 x i64> %va, splat (i64 2)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -989,9 +933,7 @@ define <vscale x 4 x i64> @vdivu_vi_nxv4i64_2(<vscale x 4 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v12, v12, 4
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i64> %splat, %vb
+  %vc = shl <vscale x 4 x i64> splat (i64 16), %vb
   %vd = udiv <vscale x 4 x i64> %va, %vc
   ret <vscale x 4 x i64> %vd
 }
@@ -1066,9 +1008,7 @@ define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-V-NEXT:    li a0, 61
 ; RV64-V-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = udiv <vscale x 8 x i64> %va, %splat
+  %vc = udiv <vscale x 8 x i64> %va, splat (i64 -7)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1078,9 +1018,7 @@ define <vscale x 8 x i64> @vdivu_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = udiv <vscale x 8 x i64> %va, %splat
+  %vc = udiv <vscale x 8 x i64> %va, splat (i64 2)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1092,9 +1030,7 @@ define <vscale x 8 x i64> @vdivu_vi_nxv8i64_2(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v16, v16, 4
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i64> %splat, %vb
+  %vc = shl <vscale x 8 x i64> splat (i64 16), %vb
   %vd = udiv <vscale x 8 x i64> %va, %vc
   ret <vscale x 8 x i64> %vd
 }
@@ -1107,9 +1043,7 @@ define <vscale x 8 x i32> @vdivu_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
   %vc = udiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1122,11 +1056,9 @@ define <vscale x 8 x i32> @vdivu_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sig
 ; CHECK-NEXT:    vmerge.vxm v12, v12, a0, v0
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
   %vc = udiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -1143,11 +1075,7 @@ define <vscale x 8 x i32> @vdivu_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vadd.vv v12, v16, v12
 ; CHECK-NEXT:    vsrl.vi v8, v12, 2, v0.t
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head2 = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
   %vc = udiv <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
index d6ebf822fd2668..f41b885a66eaae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vdivu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -67,9 +65,7 @@ define <vscale x 1 x i8> @vdivu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -91,9 +87,7 @@ define <vscale x 2 x i8> @vdivu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -117,9 +111,7 @@ define <vscale x 2 x i8> @vdivu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -153,9 +145,7 @@ define <vscale x 4 x i8> @vdivu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -179,9 +169,7 @@ define <vscale x 4 x i8> @vdivu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -203,9 +191,7 @@ define <vscale x 8 x i8> @vdivu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -229,9 +215,7 @@ define <vscale x 8 x i8> @vdivu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -253,9 +237,7 @@ define <vscale x 16 x i8> @vdivu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -279,9 +261,7 @@ define <vscale x 16 x i8> @vdivu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -303,9 +283,7 @@ define <vscale x 32 x i8> @vdivu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -329,9 +307,7 @@ define <vscale x 32 x i8> @vdivu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -353,9 +329,7 @@ define <vscale x 64 x i8> @vdivu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -379,9 +353,7 @@ define <vscale x 64 x i8> @vdivu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -403,9 +375,7 @@ define <vscale x 1 x i16> @vdivu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -429,9 +399,7 @@ define <vscale x 1 x i16> @vdivu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -453,9 +421,7 @@ define <vscale x 2 x i16> @vdivu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -479,9 +445,7 @@ define <vscale x 2 x i16> @vdivu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -503,9 +467,7 @@ define <vscale x 4 x i16> @vdivu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -529,9 +491,7 @@ define <vscale x 4 x i16> @vdivu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -553,9 +513,7 @@ define <vscale x 8 x i16> @vdivu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -579,9 +537,7 @@ define <vscale x 8 x i16> @vdivu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -603,9 +559,7 @@ define <vscale x 16 x i16> @vdivu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -629,9 +583,7 @@ define <vscale x 16 x i16> @vdivu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -653,9 +605,7 @@ define <vscale x 32 x i16> @vdivu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -679,9 +629,7 @@ define <vscale x 32 x i16> @vdivu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -703,9 +651,7 @@ define <vscale x 1 x i32> @vdivu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -729,9 +675,7 @@ define <vscale x 1 x i32> @vdivu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -753,9 +697,7 @@ define <vscale x 2 x i32> @vdivu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -779,9 +721,7 @@ define <vscale x 2 x i32> @vdivu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -803,9 +743,7 @@ define <vscale x 4 x i32> @vdivu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -829,9 +767,7 @@ define <vscale x 4 x i32> @vdivu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -853,9 +789,7 @@ define <vscale x 8 x i32> @vdivu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -879,9 +813,7 @@ define <vscale x 8 x i32> @vdivu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -903,9 +835,7 @@ define <vscale x 16 x i32> @vdivu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -929,9 +859,7 @@ define <vscale x 16 x i32> @vdivu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -953,9 +881,7 @@ define <vscale x 1 x i64> @vdivu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1007,9 +933,7 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1031,9 +955,7 @@ define <vscale x 2 x i64> @vdivu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1085,9 +1007,7 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1109,9 +1029,7 @@ define <vscale x 4 x i64> @vdivu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1163,9 +1081,7 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1187,9 +1103,7 @@ define <vscale x 8 x i64> @vdivu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1241,8 +1155,6 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index 22f0d8bba2e629..df2bc523cd7a83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -46,9 +46,7 @@ define <vscale x 1 x half> @vfabs_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <vscale x 2 x half> @vfabs_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <vscale x 4 x half> @vfabs_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <vscale x 8 x half> @vfabs_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -222,9 +214,7 @@ define <vscale x 16 x half> @vfabs_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -279,8 +269,6 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -288,6 +276,8 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -306,9 +296,7 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -330,9 +318,7 @@ define <vscale x 1 x float> @vfabs_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -354,9 +340,7 @@ define <vscale x 2 x float> @vfabs_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -378,9 +362,7 @@ define <vscale x 4 x float> @vfabs_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -402,9 +384,7 @@ define <vscale x 8 x float> @vfabs_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -426,9 +406,7 @@ define <vscale x 16 x float> @vfabs_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -450,9 +428,7 @@ define <vscale x 1 x double> @vfabs_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -474,9 +450,7 @@ define <vscale x 2 x double> @vfabs_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -498,9 +472,7 @@ define <vscale x 4 x double> @vfabs_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -522,9 +494,7 @@ define <vscale x 7 x double> @vfabs_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -546,9 +516,7 @@ define <vscale x 8 x double> @vfabs_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -598,8 +566,6 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
index 0651438429fdef..4065b69e781ad4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
@@ -562,9 +562,7 @@ define <vscale x 8 x float> @vfadd_vv_mask_nxv8f32(<vscale x 8 x float> %va, <vs
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> %splat
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> splat (float 0.0)
   %vc = fadd fast <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
@@ -575,11 +573,9 @@ define <vscale x 8 x float> @vfadd_vf_mask_nxv8f32(<vscale x 8 x float> %va, flo
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat0 = shufflevector <vscale x 8 x float> %head0, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   %head1 = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat1 = shufflevector <vscale x 8 x float> %head1, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> %splat0
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> splat (float 0.0)
   %vc = fadd fast <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 4168f5cd507911..c69a7bc5cece47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -131,9 +129,7 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -160,9 +156,7 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -206,9 +200,7 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -262,9 +254,7 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -308,9 +298,7 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -364,9 +352,7 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -410,9 +396,7 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -466,9 +450,7 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -512,9 +494,7 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -568,9 +548,7 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -647,8 +625,6 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -656,6 +632,8 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -684,9 +662,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -752,8 +728,6 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -761,6 +735,8 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -782,9 +758,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -806,9 +780,7 @@ define <vscale x 1 x float> @vfadd_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -832,9 +804,7 @@ define <vscale x 1 x float> @vfadd_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -856,9 +826,7 @@ define <vscale x 2 x float> @vfadd_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -882,9 +850,7 @@ define <vscale x 2 x float> @vfadd_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -906,9 +872,7 @@ define <vscale x 4 x float> @vfadd_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -932,9 +896,7 @@ define <vscale x 4 x float> @vfadd_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -956,9 +918,7 @@ define <vscale x 8 x float> @vfadd_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -982,9 +942,7 @@ define <vscale x 8 x float> @vfadd_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -1006,9 +964,7 @@ define <vscale x 16 x float> @vfadd_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1032,9 +988,7 @@ define <vscale x 16 x float> @vfadd_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1056,9 +1010,7 @@ define <vscale x 1 x double> @vfadd_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1082,9 +1034,7 @@ define <vscale x 1 x double> @vfadd_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1106,9 +1056,7 @@ define <vscale x 2 x double> @vfadd_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1132,9 +1080,7 @@ define <vscale x 2 x double> @vfadd_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1156,9 +1102,7 @@ define <vscale x 4 x double> @vfadd_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1182,9 +1126,7 @@ define <vscale x 4 x double> @vfadd_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1218,9 +1160,7 @@ define <vscale x 8 x double> @vfadd_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1244,8 +1184,6 @@ define <vscale x 8 x double> @vfadd_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
index 12bfc235c0c6d6..be2d576597da23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
@@ -28,9 +28,7 @@ define <vscale x 2 x i1> @isnan_nxv2f16_unmasked(<vscale x 2 x half> %x, i32 zer
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> %m, i32 %evl)  ; nan
+  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> splat (i1 true), i32 %evl)  ; nan
   ret <vscale x 2 x i1> %1
 }
 
@@ -58,9 +56,7 @@ define <vscale x 2 x i1> @isnan_nxv2f32_unmasked(<vscale x 2 x float> %x, i32 ze
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> %m, i32 %evl)
+  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i1> %1
 }
 
@@ -88,9 +84,7 @@ define <vscale x 4 x i1> @isnan_nxv4f32_unmasked(<vscale x 4 x float> %x, i32 ze
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> %m, i32 %evl)  ; nan
+  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> splat (i1 true), i32 %evl)  ; nan
   ret <vscale x 4 x i1> %1
 }
 
@@ -116,9 +110,7 @@ define <vscale x 8 x i1> @isnan_nxv8f32_unmasked(<vscale x 8 x float> %x, i32 ze
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> %m, i32 %evl)
+  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i1> %1
 }
 
@@ -144,9 +136,7 @@ define <vscale x 16 x i1> @isnan_nxv16f32_unmasked(<vscale x 16 x float> %x, i32
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-    %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> %m, i32 %evl)
+  %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i1> %1
 }
 
@@ -174,9 +164,7 @@ define <vscale x 2 x i1> @isnormal_nxv2f64_unmasked(<vscale x 2 x double> %x, i3
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> splat (i1 true), i32 %evl) ; 0x204 = "inf"
   ret <vscale x 2 x i1> %1
 }
 
@@ -202,9 +190,7 @@ define <vscale x 4 x i1> @isposinf_nxv4f64_unmasked(<vscale x 4 x double> %x, i3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> splat (i1 true), i32 %evl) ; 0x200 = "+inf"
   ret <vscale x 4 x i1> %1
 }
 
@@ -228,9 +214,7 @@ define <vscale x 8 x i1> @isneginf_nxv8f64_unmasked(<vscale x 8 x double> %x, i3
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmseq.vi v0, v8, 1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> %m, i32 %evl) ; "-inf"
+  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> splat (i1 true), i32 %evl) ; "-inf"
   ret <vscale x 8 x i1> %1
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll
index b46b6743505be0..7d1d9ec1680c1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll
@@ -564,9 +564,7 @@ define <vscale x 8 x float> @vfdiv_vv_mask_nxv8f32(<vscale x 8 x float> %va, <vs
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> %splat
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> splat (float 0.0)
   %vc = fdiv <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
@@ -579,11 +577,9 @@ define <vscale x 8 x float> @vfdiv_vf_mask_nxv8f32(<vscale x 8 x float> %va, flo
 ; CHECK-NEXT:    vfmerge.vfm v12, v12, fa0, v0
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat0 = shufflevector <vscale x 8 x float> %head0, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   %head1 = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat1 = shufflevector <vscale x 8 x float> %head1, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> %splat0
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> splat (float 0.0)
   %vc = fdiv <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 396e99bc5e4f5d..3ad17e85570a20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfdiv_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -104,9 +102,7 @@ define <vscale x 1 x half> @vfdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -150,9 +146,7 @@ define <vscale x 2 x half> @vfdiv_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -206,9 +200,7 @@ define <vscale x 2 x half> @vfdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,9 +244,7 @@ define <vscale x 4 x half> @vfdiv_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -308,9 +298,7 @@ define <vscale x 4 x half> @vfdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -354,9 +342,7 @@ define <vscale x 8 x half> @vfdiv_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -410,9 +396,7 @@ define <vscale x 8 x half> @vfdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -456,9 +440,7 @@ define <vscale x 16 x half> @vfdiv_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -512,9 +494,7 @@ define <vscale x 16 x half> @vfdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -591,8 +571,6 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -600,6 +578,8 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -628,9 +608,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -696,8 +674,6 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -705,6 +681,8 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -726,9 +704,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -750,9 +726,7 @@ define <vscale x 1 x float> @vfdiv_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -776,9 +750,7 @@ define <vscale x 1 x float> @vfdiv_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -800,9 +772,7 @@ define <vscale x 2 x float> @vfdiv_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -826,9 +796,7 @@ define <vscale x 2 x float> @vfdiv_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -850,9 +818,7 @@ define <vscale x 4 x float> @vfdiv_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -876,9 +842,7 @@ define <vscale x 4 x float> @vfdiv_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -900,9 +864,7 @@ define <vscale x 8 x float> @vfdiv_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -926,9 +888,7 @@ define <vscale x 8 x float> @vfdiv_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -950,9 +910,7 @@ define <vscale x 16 x float> @vfdiv_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -976,9 +934,7 @@ define <vscale x 16 x float> @vfdiv_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1000,9 +956,7 @@ define <vscale x 1 x double> @vfdiv_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1026,9 +980,7 @@ define <vscale x 1 x double> @vfdiv_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1050,9 +1002,7 @@ define <vscale x 2 x double> @vfdiv_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1076,9 +1026,7 @@ define <vscale x 2 x double> @vfdiv_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1100,9 +1048,7 @@ define <vscale x 4 x double> @vfdiv_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1126,9 +1072,7 @@ define <vscale x 4 x double> @vfdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1162,9 +1106,7 @@ define <vscale x 8 x double> @vfdiv_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1188,8 +1130,6 @@ define <vscale x 8 x double> @vfdiv_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
index 1bc0ed4e7513f3..a0f269b59bfe54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
@@ -36,12 +36,8 @@ define <vscale x 1 x double> @test2(<vscale x 1 x double> %a, <vscale x 1 x i1>
 ; CHECK-NEXT:    vfadd.vf v9, v9, fa5, v0.t
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head1 = insertelement <vscale x 1 x double> poison, double 2.0, i32 0
-  %c1 = shufflevector <vscale x 1 x double> %elt.head1, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %c1, <vscale x 1 x i1> %m, i32 %evl)
-  %elt.head2 = insertelement <vscale x 1 x double> poison, double 4.0, i32 0
-  %c2 = shufflevector <vscale x 1 x double> %elt.head2, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %c2, <vscale x 1 x double> %t, <vscale x 1 x i1> %m, i32 %evl)
+  %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
+  %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %t, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -60,11 +56,7 @@ define <vscale x 1 x double> @test3(<vscale x 1 x double> %a, <vscale x 1 x doub
 ; CHECK-NEXT:    vfmadd.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %elt.head1 = insertelement <vscale x 1 x double> poison, double 2.0, i32 0
-  %c1 = shufflevector <vscale x 1 x double> %elt.head1, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %c1, <vscale x 1 x i1> %m, i32 %evl)
-  %elt.head2 = insertelement <vscale x 1 x double> poison, double 4.0, i32 0
-  %c2 = shufflevector <vscale x 1 x double> %elt.head2, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %t, <vscale x 1 x double> %c2, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
+  %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %t, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 9ab907bfcca67e..a41c2621161366 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -23,9 +23,7 @@ define <vscale x 1 x half> @vfma_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -61,9 +59,7 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -75,9 +71,7 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked_commute(<vscale x 1 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -100,9 +94,7 @@ define <vscale x 2 x half> @vfma_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -152,9 +142,7 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked_commute(<vscale x 2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -177,9 +165,7 @@ define <vscale x 4 x half> @vfma_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -215,9 +201,7 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -229,9 +213,7 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked_commute(<vscale x 4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -254,9 +236,7 @@ define <vscale x 8 x half> @vfma_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -292,9 +272,7 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -306,9 +284,7 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked_commute(<vscale x 8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -331,9 +307,7 @@ define <vscale x 16 x half> @vfma_vv_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -369,9 +343,7 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -383,9 +355,7 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked_commute(<vscale x 16 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -410,9 +380,7 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -448,9 +416,7 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -462,9 +428,7 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked_commute(<vscale x 32 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -487,9 +451,7 @@ define <vscale x 1 x float> @vfma_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -525,9 +487,7 @@ define <vscale x 1 x float> @vfma_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -539,9 +499,7 @@ define <vscale x 1 x float> @vfma_vf_nxv1f32_unmasked_commute(<vscale x 1 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -564,9 +522,7 @@ define <vscale x 2 x float> @vfma_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -602,9 +558,7 @@ define <vscale x 2 x float> @vfma_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -616,9 +570,7 @@ define <vscale x 2 x float> @vfma_vf_nxv2f32_unmasked_commute(<vscale x 2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -641,9 +593,7 @@ define <vscale x 4 x float> @vfma_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -679,9 +629,7 @@ define <vscale x 4 x float> @vfma_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -693,9 +641,7 @@ define <vscale x 4 x float> @vfma_vf_nxv4f32_unmasked_commute(<vscale x 4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -718,9 +664,7 @@ define <vscale x 8 x float> @vfma_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -756,9 +700,7 @@ define <vscale x 8 x float> @vfma_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -770,9 +712,7 @@ define <vscale x 8 x float> @vfma_vf_nxv8f32_unmasked_commute(<vscale x 8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -797,9 +737,7 @@ define <vscale x 16 x float> @vfma_vv_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -835,9 +773,7 @@ define <vscale x 16 x float> @vfma_vf_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -849,9 +785,7 @@ define <vscale x 16 x float> @vfma_vf_nxv16f32_unmasked_commute(<vscale x 16 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -874,9 +808,7 @@ define <vscale x 1 x double> @vfma_vv_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -912,9 +844,7 @@ define <vscale x 1 x double> @vfma_vf_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -926,9 +856,7 @@ define <vscale x 1 x double> @vfma_vf_nxv1f64_unmasked_commute(<vscale x 1 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -951,9 +879,7 @@ define <vscale x 2 x double> @vfma_vv_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -989,9 +915,7 @@ define <vscale x 2 x double> @vfma_vf_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1003,9 +927,7 @@ define <vscale x 2 x double> @vfma_vf_nxv2f64_unmasked_commute(<vscale x 2 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1028,9 +950,7 @@ define <vscale x 4 x double> @vfma_vv_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1066,9 +986,7 @@ define <vscale x 4 x double> @vfma_vf_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1080,9 +998,7 @@ define <vscale x 4 x double> @vfma_vf_nxv4f64_unmasked_commute(<vscale x 4 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1107,9 +1023,7 @@ define <vscale x 7 x double> @vfma_vv_nxv7f64_unmasked(<vscale x 7 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.fma.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x double> %c, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.fma.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x double> %c, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1134,9 +1048,7 @@ define <vscale x 8 x double> @vfma_vv_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1172,9 +1084,7 @@ define <vscale x 8 x double> @vfma_vf_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1186,9 +1096,7 @@ define <vscale x 8 x double> @vfma_vf_nxv8f64_unmasked_commute(<vscale x 8 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1359,9 +1267,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.fma.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.fma.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
 
@@ -1385,10 +1291,8 @@ define <vscale x 1 x half> @vfmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1426,10 +1330,8 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1441,10 +1343,8 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1479,11 +1379,9 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1493,11 +1391,9 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1537,11 +1433,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1553,11 +1447,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1597,11 +1489,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1613,11 +1503,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1652,11 +1540,9 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1666,11 +1552,9 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1708,10 +1592,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1723,10 +1605,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1764,10 +1644,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1779,10 +1657,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1806,10 +1682,8 @@ define <vscale x 2 x half> @vfmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1847,10 +1721,8 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1862,10 +1734,8 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1900,11 +1770,9 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1914,11 +1782,9 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1958,11 +1824,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1974,11 +1838,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2018,11 +1880,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2034,11 +1894,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2073,11 +1931,9 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2087,11 +1943,9 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2129,10 +1983,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2144,10 +1996,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2185,10 +2035,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2200,10 +2048,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2227,10 +2073,8 @@ define <vscale x 4 x half> @vfmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2268,10 +2112,8 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2283,10 +2125,8 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2321,11 +2161,9 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2335,11 +2173,9 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2379,11 +2215,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2395,11 +2229,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2439,11 +2271,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2455,11 +2285,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2494,11 +2322,9 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2508,11 +2334,9 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2550,10 +2374,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2565,10 +2387,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2606,10 +2426,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2621,10 +2439,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2648,10 +2464,8 @@ define <vscale x 8 x half> @vfmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2689,10 +2503,8 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2704,10 +2516,8 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2742,11 +2552,9 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2756,11 +2564,9 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2800,11 +2606,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2816,11 +2620,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2860,11 +2662,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2876,11 +2676,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2915,11 +2713,9 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2929,11 +2725,9 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2971,10 +2765,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2986,10 +2778,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3027,10 +2817,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3042,10 +2830,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3069,10 +2855,8 @@ define <vscale x 16 x half> @vfmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3110,10 +2894,8 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3125,10 +2907,8 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3163,11 +2943,9 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3177,11 +2955,9 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3221,11 +2997,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3237,11 +3011,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3281,11 +3053,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3297,11 +3067,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3336,11 +3104,9 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3350,11 +3116,9 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3392,10 +3156,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3407,10 +3169,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3448,10 +3208,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3463,10 +3221,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3492,10 +3248,8 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3533,10 +3287,8 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3548,10 +3300,8 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3589,11 +3339,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3604,11 +3352,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3648,11 +3394,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3664,11 +3408,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3708,11 +3450,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3724,11 +3464,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3766,11 +3504,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3781,11 +3517,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3823,10 +3557,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3838,10 +3570,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3879,10 +3609,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3894,10 +3622,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3921,10 +3647,8 @@ define <vscale x 1 x float> @vfmsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -3962,10 +3686,8 @@ define <vscale x 1 x float> @vfmsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -3977,10 +3699,8 @@ define <vscale x 1 x float> @vfmsub_vf_nxv1f32_unmasked_commute(<vscale x 1 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4015,11 +3735,9 @@ define <vscale x 1 x float> @vfnmadd_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4029,11 +3747,9 @@ define <vscale x 1 x float> @vfnmadd_vv_nxv1f32_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4073,11 +3789,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4089,11 +3803,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_unmasked_commute(<vscale x 1 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4133,11 +3845,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4149,11 +3859,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4188,11 +3896,9 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4202,11 +3908,9 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4244,10 +3948,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4259,10 +3961,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_unmasked_commute(<vscale x 1 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4300,10 +4000,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4315,10 +4013,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4342,10 +4038,8 @@ define <vscale x 2 x float> @vfmsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4383,10 +4077,8 @@ define <vscale x 2 x float> @vfmsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4398,10 +4090,8 @@ define <vscale x 2 x float> @vfmsub_vf_nxv2f32_unmasked_commute(<vscale x 2 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4436,11 +4126,9 @@ define <vscale x 2 x float> @vfnmadd_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4450,11 +4138,9 @@ define <vscale x 2 x float> @vfnmadd_vv_nxv2f32_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4494,11 +4180,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4510,11 +4194,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_unmasked_commute(<vscale x 2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4554,11 +4236,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4570,11 +4250,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4609,11 +4287,9 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4623,11 +4299,9 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4665,10 +4339,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4680,10 +4352,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_unmasked_commute(<vscale x 2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4721,10 +4391,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4736,10 +4404,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4763,10 +4429,8 @@ define <vscale x 4 x float> @vfmsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4804,10 +4468,8 @@ define <vscale x 4 x float> @vfmsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4819,10 +4481,8 @@ define <vscale x 4 x float> @vfmsub_vf_nxv4f32_unmasked_commute(<vscale x 4 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4857,11 +4517,9 @@ define <vscale x 4 x float> @vfnmadd_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4871,11 +4529,9 @@ define <vscale x 4 x float> @vfnmadd_vv_nxv4f32_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4915,11 +4571,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4931,11 +4585,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_unmasked_commute(<vscale x 4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4975,11 +4627,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4991,11 +4641,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5030,11 +4678,9 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5044,11 +4690,9 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5086,10 +4730,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5101,10 +4743,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_unmasked_commute(<vscale x 4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5142,10 +4782,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5157,10 +4795,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5184,10 +4820,8 @@ define <vscale x 8 x float> @vfmsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5225,10 +4859,8 @@ define <vscale x 8 x float> @vfmsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5240,10 +4872,8 @@ define <vscale x 8 x float> @vfmsub_vf_nxv8f32_unmasked_commute(<vscale x 8 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5278,11 +4908,9 @@ define <vscale x 8 x float> @vfnmadd_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5292,11 +4920,9 @@ define <vscale x 8 x float> @vfnmadd_vv_nxv8f32_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5336,11 +4962,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5352,11 +4976,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_unmasked_commute(<vscale x 8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5396,11 +5018,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5412,11 +5032,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5451,11 +5069,9 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5465,11 +5081,9 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5507,10 +5121,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5522,10 +5134,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_unmasked_commute(<vscale x 8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5563,10 +5173,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5578,10 +5186,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5607,10 +5213,8 @@ define <vscale x 16 x float> @vfmsub_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5648,10 +5252,8 @@ define <vscale x 16 x float> @vfmsub_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5663,10 +5265,8 @@ define <vscale x 16 x float> @vfmsub_vf_nxv16f32_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5704,11 +5304,9 @@ define <vscale x 16 x float> @vfnmadd_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5719,11 +5317,9 @@ define <vscale x 16 x float> @vfnmadd_vv_nxv16f32_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5763,11 +5359,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5779,11 +5373,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_unmasked_commute(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5823,11 +5415,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5839,11 +5429,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute(<vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5881,11 +5469,9 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5896,11 +5482,9 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5938,10 +5522,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5953,10 +5535,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_unmasked_commute(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5994,10 +5574,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -6009,10 +5587,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute(<vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -6036,10 +5612,8 @@ define <vscale x 1 x double> @vfmsub_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6077,10 +5651,8 @@ define <vscale x 1 x double> @vfmsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6092,10 +5664,8 @@ define <vscale x 1 x double> @vfmsub_vf_nxv1f64_unmasked_commute(<vscale x 1 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6130,11 +5700,9 @@ define <vscale x 1 x double> @vfnmadd_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6144,11 +5712,9 @@ define <vscale x 1 x double> @vfnmadd_vv_nxv1f64_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6188,11 +5754,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6204,11 +5768,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_unmasked_commute(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6248,11 +5810,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6264,11 +5824,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6303,11 +5861,9 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6317,11 +5873,9 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6359,10 +5913,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6374,10 +5926,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_unmasked_commute(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6415,10 +5965,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6430,10 +5978,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6457,10 +6003,8 @@ define <vscale x 2 x double> @vfmsub_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6498,10 +6042,8 @@ define <vscale x 2 x double> @vfmsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6513,10 +6055,8 @@ define <vscale x 2 x double> @vfmsub_vf_nxv2f64_unmasked_commute(<vscale x 2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6551,11 +6091,9 @@ define <vscale x 2 x double> @vfnmadd_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6565,11 +6103,9 @@ define <vscale x 2 x double> @vfnmadd_vv_nxv2f64_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6609,11 +6145,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6625,11 +6159,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_unmasked_commute(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6669,11 +6201,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_neg_splat_unmasked(<vscale x 2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6685,11 +6215,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6724,11 +6252,9 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6738,11 +6264,9 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6780,10 +6304,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6795,10 +6317,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_unmasked_commute(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6836,10 +6356,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_neg_splat_unmasked(<vscale x 2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6851,10 +6369,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6878,10 +6394,8 @@ define <vscale x 4 x double> @vfmsub_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6919,10 +6433,8 @@ define <vscale x 4 x double> @vfmsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6934,10 +6446,8 @@ define <vscale x 4 x double> @vfmsub_vf_nxv4f64_unmasked_commute(<vscale x 4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6972,11 +6482,9 @@ define <vscale x 4 x double> @vfnmadd_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6986,11 +6494,9 @@ define <vscale x 4 x double> @vfnmadd_vv_nxv4f64_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7030,11 +6536,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7046,11 +6550,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_unmasked_commute(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7090,11 +6592,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_neg_splat_unmasked(<vscale x 4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7106,11 +6606,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7145,11 +6643,9 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7159,11 +6655,9 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7201,10 +6695,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7216,10 +6708,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_unmasked_commute(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7257,10 +6747,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_neg_splat_unmasked(<vscale x 4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7272,10 +6760,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7301,10 +6787,8 @@ define <vscale x 8 x double> @vfmsub_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7342,10 +6826,8 @@ define <vscale x 8 x double> @vfmsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7357,10 +6839,8 @@ define <vscale x 8 x double> @vfmsub_vf_nxv8f64_unmasked_commute(<vscale x 8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7398,11 +6878,9 @@ define <vscale x 8 x double> @vfnmadd_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7413,11 +6891,9 @@ define <vscale x 8 x double> @vfnmadd_vv_nxv8f64_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7457,11 +6933,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7473,11 +6947,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_unmasked_commute(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7517,11 +6989,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_neg_splat_unmasked(<vscale x 8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7533,11 +7003,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7575,11 +7043,9 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7590,11 +7056,9 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7632,10 +7096,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7647,10 +7109,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_unmasked_commute(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7688,10 +7148,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_neg_splat_unmasked(<vscale x 8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7703,10 +7161,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll
index 6f1b3986caafea..54855e6152b95e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll
@@ -16,9 +16,7 @@ define <vscale x 1 x half> @vfmacc_vv_nxv1f16(<vscale x 1 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -30,10 +28,8 @@ define <vscale x 1 x half> @vfmacc_vv_nxv1f16_unmasked(<vscale x 1 x half> %a, <
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -46,9 +42,7 @@ define <vscale x 1 x half> @vfmacc_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -62,9 +56,7 @@ define <vscale x 1 x half> @vfmacc_vf_nxv1f16_commute(<vscale x 1 x half> %va, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -78,10 +70,8 @@ define <vscale x 1 x half> @vfmacc_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -92,9 +82,7 @@ define <vscale x 1 x half> @vfmacc_vv_nxv1f16_ta(<vscale x 1 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -108,9 +96,7 @@ define <vscale x 1 x half> @vfmacc_vf_nxv1f16_ta(<vscale x 1 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -124,9 +110,7 @@ define <vscale x 1 x half> @vfmacc_vf_nxv1f16_commute_ta(<vscale x 1 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -143,9 +127,7 @@ define <vscale x 2 x half> @vfmacc_vv_nxv2f16(<vscale x 2 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -157,10 +139,8 @@ define <vscale x 2 x half> @vfmacc_vv_nxv2f16_unmasked(<vscale x 2 x half> %a, <
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -173,9 +153,7 @@ define <vscale x 2 x half> @vfmacc_vf_nxv2f16(<vscale x 2 x half> %va, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -189,9 +167,7 @@ define <vscale x 2 x half> @vfmacc_vf_nxv2f16_commute(<vscale x 2 x half> %va, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -205,10 +181,8 @@ define <vscale x 2 x half> @vfmacc_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -219,9 +193,7 @@ define <vscale x 2 x half> @vfmacc_vv_nxv2f16_ta(<vscale x 2 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -235,9 +207,7 @@ define <vscale x 2 x half> @vfmacc_vf_nxv2f16_ta(<vscale x 2 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -251,9 +221,7 @@ define <vscale x 2 x half> @vfmacc_vf_nxv2f16_commute_ta(<vscale x 2 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -270,9 +238,7 @@ define <vscale x 4 x half> @vfmacc_vv_nxv4f16(<vscale x 4 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -284,10 +250,8 @@ define <vscale x 4 x half> @vfmacc_vv_nxv4f16_unmasked(<vscale x 4 x half> %a, <
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -300,9 +264,7 @@ define <vscale x 4 x half> @vfmacc_vf_nxv4f16(<vscale x 4 x half> %va, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -316,9 +278,7 @@ define <vscale x 4 x half> @vfmacc_vf_nxv4f16_commute(<vscale x 4 x half> %va, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -332,10 +292,8 @@ define <vscale x 4 x half> @vfmacc_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -346,9 +304,7 @@ define <vscale x 4 x half> @vfmacc_vv_nxv4f16_ta(<vscale x 4 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -362,9 +318,7 @@ define <vscale x 4 x half> @vfmacc_vf_nxv4f16_ta(<vscale x 4 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -378,9 +332,7 @@ define <vscale x 4 x half> @vfmacc_vf_nxv4f16_commute_ta(<vscale x 4 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -397,9 +349,7 @@ define <vscale x 8 x half> @vfmacc_vv_nxv8f16(<vscale x 8 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -411,10 +361,8 @@ define <vscale x 8 x half> @vfmacc_vv_nxv8f16_unmasked(<vscale x 8 x half> %a, <
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -427,9 +375,7 @@ define <vscale x 8 x half> @vfmacc_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -443,9 +389,7 @@ define <vscale x 8 x half> @vfmacc_vf_nxv8f16_commute(<vscale x 8 x half> %va, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -459,10 +403,8 @@ define <vscale x 8 x half> @vfmacc_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -473,9 +415,7 @@ define <vscale x 8 x half> @vfmacc_vv_nxv8f16_ta(<vscale x 8 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -489,9 +429,7 @@ define <vscale x 8 x half> @vfmacc_vf_nxv8f16_ta(<vscale x 8 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -505,9 +443,7 @@ define <vscale x 8 x half> @vfmacc_vf_nxv8f16_commute_ta(<vscale x 8 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -524,9 +460,7 @@ define <vscale x 16 x half> @vfmacc_vv_nxv16f16(<vscale x 16 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -538,10 +472,8 @@ define <vscale x 16 x half> @vfmacc_vv_nxv16f16_unmasked(<vscale x 16 x half> %a
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -554,9 +486,7 @@ define <vscale x 16 x half> @vfmacc_vf_nxv16f16(<vscale x 16 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -570,9 +500,7 @@ define <vscale x 16 x half> @vfmacc_vf_nxv16f16_commute(<vscale x 16 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -586,10 +514,8 @@ define <vscale x 16 x half> @vfmacc_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -600,9 +526,7 @@ define <vscale x 16 x half> @vfmacc_vv_nxv16f16_ta(<vscale x 16 x half> %a, <vsc
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -616,9 +540,7 @@ define <vscale x 16 x half> @vfmacc_vf_nxv16f16_ta(<vscale x 16 x half> %va, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -632,9 +554,7 @@ define <vscale x 16 x half> @vfmacc_vf_nxv16f16_commute_ta(<vscale x 16 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -652,9 +572,7 @@ define <vscale x 32 x half> @vfmacc_vv_nxv32f16(<vscale x 32 x half> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -667,10 +585,8 @@ define <vscale x 32 x half> @vfmacc_vv_nxv32f16_unmasked(<vscale x 32 x half> %a
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -683,9 +599,7 @@ define <vscale x 32 x half> @vfmacc_vf_nxv32f16(<vscale x 32 x half> %va, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -699,9 +613,7 @@ define <vscale x 32 x half> @vfmacc_vf_nxv32f16_commute(<vscale x 32 x half> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -715,10 +627,8 @@ define <vscale x 32 x half> @vfmacc_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -730,9 +640,7 @@ define <vscale x 32 x half> @vfmacc_vv_nxv32f16_ta(<vscale x 32 x half> %a, <vsc
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -746,9 +654,7 @@ define <vscale x 32 x half> @vfmacc_vf_nxv32f16_ta(<vscale x 32 x half> %va, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -762,9 +668,7 @@ define <vscale x 32 x half> @vfmacc_vf_nxv32f16_commute_ta(<vscale x 32 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -781,9 +685,7 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32(<vscale x 1 x float> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -795,10 +697,8 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32_unmasked(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -811,9 +711,7 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32(<vscale x 1 x float> %va, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -827,9 +725,7 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_commute(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -843,10 +739,8 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -857,9 +751,7 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32_ta(<vscale x 1 x float> %a, <vsca
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -873,9 +765,7 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_ta(<vscale x 1 x float> %va, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -889,9 +779,7 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_commute_ta(<vscale x 1 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -908,9 +796,7 @@ define <vscale x 2 x float> @vfmacc_vv_nxv2f32(<vscale x 2 x float> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -922,10 +808,8 @@ define <vscale x 2 x float> @vfmacc_vv_nxv2f32_unmasked(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -938,9 +822,7 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32(<vscale x 2 x float> %va, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -954,9 +836,7 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32_commute(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -970,10 +850,8 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -984,9 +862,7 @@ define <vscale x 2 x float> @vfmacc_vv_nxv2f32_ta(<vscale x 2 x float> %a, <vsca
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1000,9 +876,7 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32_ta(<vscale x 2 x float> %va, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1016,9 +890,7 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32_commute_ta(<vscale x 2 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1035,9 +907,7 @@ define <vscale x 4 x float> @vfmacc_vv_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1049,10 +919,8 @@ define <vscale x 4 x float> @vfmacc_vv_nxv4f32_unmasked(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1065,9 +933,7 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32(<vscale x 4 x float> %va, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1081,9 +947,7 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32_commute(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1097,10 +961,8 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1111,9 +973,7 @@ define <vscale x 4 x float> @vfmacc_vv_nxv4f32_ta(<vscale x 4 x float> %a, <vsca
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1127,9 +987,7 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32_ta(<vscale x 4 x float> %va, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1143,9 +1001,7 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32_commute_ta(<vscale x 4 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1162,9 +1018,7 @@ define <vscale x 8 x float> @vfmacc_vv_nxv8f32(<vscale x 8 x float> %a, <vscale
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1176,10 +1030,8 @@ define <vscale x 8 x float> @vfmacc_vv_nxv8f32_unmasked(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1192,9 +1044,7 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32(<vscale x 8 x float> %va, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1208,9 +1058,7 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32_commute(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1224,10 +1072,8 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1238,9 +1084,7 @@ define <vscale x 8 x float> @vfmacc_vv_nxv8f32_ta(<vscale x 8 x float> %a, <vsca
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1254,9 +1098,7 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32_ta(<vscale x 8 x float> %va, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1270,9 +1112,7 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32_commute_ta(<vscale x 8 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1290,9 +1130,7 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32(<vscale x 16 x float> %a, <vsca
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1305,10 +1143,8 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1321,9 +1157,7 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32(<vscale x 16 x float> %va, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1337,9 +1171,7 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_commute(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1353,10 +1185,8 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1368,9 +1198,7 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32_ta(<vscale x 16 x float> %a, <v
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1384,9 +1212,7 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_ta(<vscale x 16 x float> %va, f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1400,9 +1226,7 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_commute_ta(<vscale x 16 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1419,9 +1243,7 @@ define <vscale x 1 x double> @vfmacc_vv_nxv1f64(<vscale x 1 x double> %a, <vscal
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1433,10 +1255,8 @@ define <vscale x 1 x double> @vfmacc_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1449,9 +1269,7 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64(<vscale x 1 x double> %va, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1465,9 +1283,7 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64_commute(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1481,10 +1297,8 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1495,9 +1309,7 @@ define <vscale x 1 x double> @vfmacc_vv_nxv1f64_ta(<vscale x 1 x double> %a, <vs
 ; CHECK-NEXT:    vfmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1511,9 +1323,7 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64_ta(<vscale x 1 x double> %va, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1527,9 +1337,7 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64_commute_ta(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1546,9 +1354,7 @@ define <vscale x 2 x double> @vfmacc_vv_nxv2f64(<vscale x 2 x double> %a, <vscal
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1560,10 +1366,8 @@ define <vscale x 2 x double> @vfmacc_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1576,9 +1380,7 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64(<vscale x 2 x double> %va, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1592,9 +1394,7 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64_commute(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1608,10 +1408,8 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1622,9 +1420,7 @@ define <vscale x 2 x double> @vfmacc_vv_nxv2f64_ta(<vscale x 2 x double> %a, <vs
 ; CHECK-NEXT:    vfmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1638,9 +1434,7 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64_ta(<vscale x 2 x double> %va, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1654,9 +1448,7 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64_commute_ta(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1673,9 +1465,7 @@ define <vscale x 4 x double> @vfmacc_vv_nxv4f64(<vscale x 4 x double> %a, <vscal
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1687,10 +1477,8 @@ define <vscale x 4 x double> @vfmacc_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1703,9 +1491,7 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64(<vscale x 4 x double> %va, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1719,9 +1505,7 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64_commute(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1735,10 +1519,8 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1749,9 +1531,7 @@ define <vscale x 4 x double> @vfmacc_vv_nxv4f64_ta(<vscale x 4 x double> %a, <vs
 ; CHECK-NEXT:    vfmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1765,9 +1545,7 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64_ta(<vscale x 4 x double> %va, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1781,9 +1559,7 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64_commute_ta(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1801,9 +1577,7 @@ define <vscale x 8 x double> @vfmacc_vv_nxv8f64(<vscale x 8 x double> %a, <vscal
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1816,10 +1590,8 @@ define <vscale x 8 x double> @vfmacc_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1832,9 +1604,7 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64(<vscale x 8 x double> %va, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1848,9 +1618,7 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64_commute(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1864,10 +1632,8 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1879,9 +1645,7 @@ define <vscale x 8 x double> @vfmacc_vv_nxv8f64_ta(<vscale x 8 x double> %a, <vs
 ; CHECK-NEXT:    vfmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1895,9 +1659,7 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64_ta(<vscale x 8 x double> %va, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1911,9 +1673,7 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64_commute_ta(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 72101d62567b4d..7556b3ace5c6cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -94,9 +92,7 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -140,9 +136,7 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -186,9 +180,7 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -232,9 +224,7 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -311,8 +301,6 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -320,6 +308,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -348,9 +338,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -372,9 +360,7 @@ define <vscale x 1 x float> @vfmax_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -396,9 +382,7 @@ define <vscale x 2 x float> @vfmax_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -420,9 +404,7 @@ define <vscale x 4 x float> @vfmax_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -444,9 +426,7 @@ define <vscale x 8 x float> @vfmax_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -468,9 +448,7 @@ define <vscale x 16 x float> @vfmax_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -492,9 +470,7 @@ define <vscale x 1 x double> @vfmax_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -516,9 +492,7 @@ define <vscale x 2 x double> @vfmax_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -540,9 +514,7 @@ define <vscale x 4 x double> @vfmax_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -564,8 +536,6 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index 15fa24a35b7d59..755c665376128a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -94,9 +92,7 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -140,9 +136,7 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -186,9 +180,7 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -232,9 +224,7 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -311,8 +301,6 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -320,6 +308,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -348,9 +338,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -372,9 +360,7 @@ define <vscale x 1 x float> @vfmin_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -396,9 +382,7 @@ define <vscale x 2 x float> @vfmin_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -420,9 +404,7 @@ define <vscale x 4 x float> @vfmin_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -444,9 +426,7 @@ define <vscale x 8 x float> @vfmin_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -468,9 +448,7 @@ define <vscale x 16 x float> @vfmin_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -492,9 +470,7 @@ define <vscale x 1 x double> @vfmin_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -516,9 +492,7 @@ define <vscale x 2 x double> @vfmin_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -540,9 +514,7 @@ define <vscale x 4 x double> @vfmin_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -564,8 +536,6 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll
index 1ce21a1c463316..f1d5562131b8e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll
@@ -16,10 +16,8 @@ define <vscale x 1 x half> @vmfsac_vv_nxv1f16(<vscale x 1 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -31,11 +29,9 @@ define <vscale x 1 x half> @vmfsac_vv_nxv1f16_unmasked(<vscale x 1 x half> %a, <
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -48,10 +44,8 @@ define <vscale x 1 x half> @vmfsac_vf_nxv1f16(<vscale x 1 x half> %a, half %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -65,10 +59,8 @@ define <vscale x 1 x half> @vmfsac_vf_nxv1f16_commute(<vscale x 1 x half> %a, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %a, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %a, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -82,11 +74,9 @@ define <vscale x 1 x half> @vmfsac_vf_nxv1f16_unmasked(<vscale x 1 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -97,10 +87,8 @@ define <vscale x 1 x half> @vmfsac_vv_nxv1f16_ta(<vscale x 1 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -114,10 +102,8 @@ define <vscale x 1 x half> @vmfsac_vf_nxv1f16_ta(<vscale x 1 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -131,10 +117,8 @@ define <vscale x 1 x half> @vmfsac_vf_nxv1f16_commute_ta(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %a, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %a, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -151,10 +135,8 @@ define <vscale x 2 x half> @vmfsac_vv_nxv2f16(<vscale x 2 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -166,11 +148,9 @@ define <vscale x 2 x half> @vmfsac_vv_nxv2f16_unmasked(<vscale x 2 x half> %a, <
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -183,10 +163,8 @@ define <vscale x 2 x half> @vmfsac_vf_nxv2f16(<vscale x 2 x half> %a, half %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -200,10 +178,8 @@ define <vscale x 2 x half> @vmfsac_vf_nxv2f16_commute(<vscale x 2 x half> %a, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %a, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %a, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -217,11 +193,9 @@ define <vscale x 2 x half> @vmfsac_vf_nxv2f16_unmasked(<vscale x 2 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -232,10 +206,8 @@ define <vscale x 2 x half> @vmfsac_vv_nxv2f16_ta(<vscale x 2 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -249,10 +221,8 @@ define <vscale x 2 x half> @vmfsac_vf_nxv2f16_ta(<vscale x 2 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -266,10 +236,8 @@ define <vscale x 2 x half> @vmfsac_vf_nxv2f16_commute_ta(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %a, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %a, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -286,10 +254,8 @@ define <vscale x 4 x half> @vmfsac_vv_nxv4f16(<vscale x 4 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -301,11 +267,9 @@ define <vscale x 4 x half> @vmfsac_vv_nxv4f16_unmasked(<vscale x 4 x half> %a, <
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -318,10 +282,8 @@ define <vscale x 4 x half> @vmfsac_vf_nxv4f16(<vscale x 4 x half> %a, half %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -335,10 +297,8 @@ define <vscale x 4 x half> @vmfsac_vf_nxv4f16_commute(<vscale x 4 x half> %a, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %a, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %a, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -352,11 +312,9 @@ define <vscale x 4 x half> @vmfsac_vf_nxv4f16_unmasked(<vscale x 4 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -367,10 +325,8 @@ define <vscale x 4 x half> @vmfsac_vv_nxv4f16_ta(<vscale x 4 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -384,10 +340,8 @@ define <vscale x 4 x half> @vmfsac_vf_nxv4f16_ta(<vscale x 4 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -401,10 +355,8 @@ define <vscale x 4 x half> @vmfsac_vf_nxv4f16_commute_ta(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %a, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %a, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -421,10 +373,8 @@ define <vscale x 8 x half> @vmfsac_vv_nxv8f16(<vscale x 8 x half> %a, <vscale x
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -436,11 +386,9 @@ define <vscale x 8 x half> @vmfsac_vv_nxv8f16_unmasked(<vscale x 8 x half> %a, <
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -453,10 +401,8 @@ define <vscale x 8 x half> @vmfsac_vf_nxv8f16(<vscale x 8 x half> %a, half %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -470,10 +416,8 @@ define <vscale x 8 x half> @vmfsac_vf_nxv8f16_commute(<vscale x 8 x half> %a, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %a, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %a, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -487,11 +431,9 @@ define <vscale x 8 x half> @vmfsac_vf_nxv8f16_unmasked(<vscale x 8 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -502,10 +444,8 @@ define <vscale x 8 x half> @vmfsac_vv_nxv8f16_ta(<vscale x 8 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -519,10 +459,8 @@ define <vscale x 8 x half> @vmfsac_vf_nxv8f16_ta(<vscale x 8 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -536,10 +474,8 @@ define <vscale x 8 x half> @vmfsac_vf_nxv8f16_commute_ta(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %a, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %a, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -556,10 +492,8 @@ define <vscale x 16 x half> @vmfsac_vv_nxv16f16(<vscale x 16 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -571,11 +505,9 @@ define <vscale x 16 x half> @vmfsac_vv_nxv16f16_unmasked(<vscale x 16 x half> %a
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -588,10 +520,8 @@ define <vscale x 16 x half> @vmfsac_vf_nxv16f16(<vscale x 16 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -605,10 +535,8 @@ define <vscale x 16 x half> @vmfsac_vf_nxv16f16_commute(<vscale x 16 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %a, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %a, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -622,11 +550,9 @@ define <vscale x 16 x half> @vmfsac_vf_nxv16f16_unmasked(<vscale x 16 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -637,10 +563,8 @@ define <vscale x 16 x half> @vmfsac_vv_nxv16f16_ta(<vscale x 16 x half> %a, <vsc
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -654,10 +578,8 @@ define <vscale x 16 x half> @vmfsac_vf_nxv16f16_ta(<vscale x 16 x half> %a, half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -671,10 +593,8 @@ define <vscale x 16 x half> @vmfsac_vf_nxv16f16_commute_ta(<vscale x 16 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %a, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %a, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -692,10 +612,8 @@ define <vscale x 32 x half> @vmfsac_vv_nxv32f16(<vscale x 32 x half> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -708,11 +626,9 @@ define <vscale x 32 x half> @vmfsac_vv_nxv32f16_unmasked(<vscale x 32 x half> %a
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -725,10 +641,8 @@ define <vscale x 32 x half> @vmfsac_vf_nxv32f16(<vscale x 32 x half> %a, half %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -742,10 +656,8 @@ define <vscale x 32 x half> @vmfsac_vf_nxv32f16_commute(<vscale x 32 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %a, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %a, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -759,11 +671,9 @@ define <vscale x 32 x half> @vmfsac_vf_nxv32f16_unmasked(<vscale x 32 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -775,10 +685,8 @@ define <vscale x 32 x half> @vmfsac_vv_nxv32f16_ta(<vscale x 32 x half> %a, <vsc
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -792,10 +700,8 @@ define <vscale x 32 x half> @vmfsac_vf_nxv32f16_ta(<vscale x 32 x half> %a, half
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -809,10 +715,8 @@ define <vscale x 32 x half> @vmfsac_vf_nxv32f16_commute_ta(<vscale x 32 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %a, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %a, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -829,10 +733,8 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32(<vscale x 1 x float> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -844,11 +746,9 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32_unmasked(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -861,10 +761,8 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32(<vscale x 1 x float> %a, float %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -878,10 +776,8 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32_commute(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %a, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %a, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -895,11 +791,9 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32_unmasked(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -910,10 +804,8 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32_ta(<vscale x 1 x float> %a, <vsca
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -927,10 +819,8 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32_ta(<vscale x 1 x float> %a, float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -944,10 +834,8 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32_commute_ta(<vscale x 1 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %a, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %a, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -964,10 +852,8 @@ define <vscale x 2 x float> @vmfsac_vv_nxv2f32(<vscale x 2 x float> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -979,11 +865,9 @@ define <vscale x 2 x float> @vmfsac_vv_nxv2f32_unmasked(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -996,10 +880,8 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32(<vscale x 2 x float> %a, float %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1013,10 +895,8 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32_commute(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %a, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %a, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1030,11 +910,9 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32_unmasked(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -1045,10 +923,8 @@ define <vscale x 2 x float> @vmfsac_vv_nxv2f32_ta(<vscale x 2 x float> %a, <vsca
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1062,10 +938,8 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32_ta(<vscale x 2 x float> %a, float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1079,10 +953,8 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32_commute_ta(<vscale x 2 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %a, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %a, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1099,10 +971,8 @@ define <vscale x 4 x float> @vmfsac_vv_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1114,11 +984,9 @@ define <vscale x 4 x float> @vmfsac_vv_nxv4f32_unmasked(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1131,10 +999,8 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32(<vscale x 4 x float> %a, float %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1148,10 +1014,8 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32_commute(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %a, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %a, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1165,11 +1029,9 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32_unmasked(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1180,10 +1042,8 @@ define <vscale x 4 x float> @vmfsac_vv_nxv4f32_ta(<vscale x 4 x float> %a, <vsca
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1197,10 +1057,8 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32_ta(<vscale x 4 x float> %a, float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1214,10 +1072,8 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32_commute_ta(<vscale x 4 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %a, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %a, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1234,10 +1090,8 @@ define <vscale x 8 x float> @vmfsac_vv_nxv8f32(<vscale x 8 x float> %a, <vscale
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1249,11 +1103,9 @@ define <vscale x 8 x float> @vmfsac_vv_nxv8f32_unmasked(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1266,10 +1118,8 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32(<vscale x 8 x float> %a, float %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1283,10 +1133,8 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32_commute(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %a, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %a, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1300,11 +1148,9 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32_unmasked(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1315,10 +1161,8 @@ define <vscale x 8 x float> @vmfsac_vv_nxv8f32_ta(<vscale x 8 x float> %a, <vsca
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1332,10 +1176,8 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32_ta(<vscale x 8 x float> %a, float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1349,10 +1191,8 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32_commute_ta(<vscale x 8 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %a, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %a, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1370,10 +1210,8 @@ define <vscale x 16 x float> @vmfsac_vv_nxv16f32(<vscale x 16 x float> %a, <vsca
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1386,11 +1224,9 @@ define <vscale x 16 x float> @vmfsac_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1403,10 +1239,8 @@ define <vscale x 16 x float> @vmfsac_vf_nxv16f32(<vscale x 16 x float> %a, float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1420,10 +1254,8 @@ define <vscale x 16 x float> @vmfsac_vf_nxv16f32_commute(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %a, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %a, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1437,11 +1269,9 @@ define <vscale x 16 x float> @vmfsac_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1453,10 +1283,8 @@ define <vscale x 16 x float> @vmfsac_vv_nxv16f32_ta(<vscale x 16 x float> %a, <v
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1470,10 +1298,8 @@ define <vscale x 16 x float> @vmfsac_vf_nxv16f32_ta(<vscale x 16 x float> %a, fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1487,10 +1313,8 @@ define <vscale x 16 x float> @vmfsac_vf_nxv16f32_commute_ta(<vscale x 16 x float
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %a, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %a, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1507,10 +1331,8 @@ define <vscale x 1 x double> @vmfsac_vv_nxv1f64(<vscale x 1 x double> %a, <vscal
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1522,11 +1344,9 @@ define <vscale x 1 x double> @vmfsac_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1539,10 +1359,8 @@ define <vscale x 1 x double> @vmfsac_vf_nxv1f64(<vscale x 1 x double> %a, double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1556,10 +1374,8 @@ define <vscale x 1 x double> @vmfsac_vf_nxv1f64_commute(<vscale x 1 x double> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %a, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %a, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1573,11 +1389,9 @@ define <vscale x 1 x double> @vmfsac_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1588,10 +1402,8 @@ define <vscale x 1 x double> @vmfsac_vv_nxv1f64_ta(<vscale x 1 x double> %a, <vs
 ; CHECK-NEXT:    vfmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1605,10 +1417,8 @@ define <vscale x 1 x double> @vmfsac_vf_nxv1f64_ta(<vscale x 1 x double> %a, dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1622,10 +1432,8 @@ define <vscale x 1 x double> @vmfsac_vf_nxv1f64_commute_ta(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %a, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %a, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1642,10 +1450,8 @@ define <vscale x 2 x double> @vmfsac_vv_nxv2f64(<vscale x 2 x double> %a, <vscal
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1657,11 +1463,9 @@ define <vscale x 2 x double> @vmfsac_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1674,10 +1478,8 @@ define <vscale x 2 x double> @vmfsac_vf_nxv2f64(<vscale x 2 x double> %a, double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1691,10 +1493,8 @@ define <vscale x 2 x double> @vmfsac_vf_nxv2f64_commute(<vscale x 2 x double> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %a, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %a, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1708,11 +1508,9 @@ define <vscale x 2 x double> @vmfsac_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1723,10 +1521,8 @@ define <vscale x 2 x double> @vmfsac_vv_nxv2f64_ta(<vscale x 2 x double> %a, <vs
 ; CHECK-NEXT:    vfmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1740,10 +1536,8 @@ define <vscale x 2 x double> @vmfsac_vf_nxv2f64_ta(<vscale x 2 x double> %a, dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1757,10 +1551,8 @@ define <vscale x 2 x double> @vmfsac_vf_nxv2f64_commute_ta(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %a, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %a, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1777,10 +1569,8 @@ define <vscale x 4 x double> @vmfsac_vv_nxv4f64(<vscale x 4 x double> %a, <vscal
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1792,11 +1582,9 @@ define <vscale x 4 x double> @vmfsac_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1809,10 +1597,8 @@ define <vscale x 4 x double> @vmfsac_vf_nxv4f64(<vscale x 4 x double> %a, double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1826,10 +1612,8 @@ define <vscale x 4 x double> @vmfsac_vf_nxv4f64_commute(<vscale x 4 x double> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %a, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %a, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1843,11 +1627,9 @@ define <vscale x 4 x double> @vmfsac_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1858,10 +1640,8 @@ define <vscale x 4 x double> @vmfsac_vv_nxv4f64_ta(<vscale x 4 x double> %a, <vs
 ; CHECK-NEXT:    vfmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1875,10 +1655,8 @@ define <vscale x 4 x double> @vmfsac_vf_nxv4f64_ta(<vscale x 4 x double> %a, dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1892,10 +1670,8 @@ define <vscale x 4 x double> @vmfsac_vf_nxv4f64_commute_ta(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %a, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %a, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1913,10 +1689,8 @@ define <vscale x 8 x double> @vmfsac_vv_nxv8f64(<vscale x 8 x double> %a, <vscal
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1929,11 +1703,9 @@ define <vscale x 8 x double> @vmfsac_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1946,10 +1718,8 @@ define <vscale x 8 x double> @vmfsac_vf_nxv8f64(<vscale x 8 x double> %a, double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1963,10 +1733,8 @@ define <vscale x 8 x double> @vmfsac_vf_nxv8f64_commute(<vscale x 8 x double> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %a, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %a, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1980,11 +1748,9 @@ define <vscale x 8 x double> @vmfsac_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1996,10 +1762,8 @@ define <vscale x 8 x double> @vmfsac_vv_nxv8f64_ta(<vscale x 8 x double> %a, <vs
 ; CHECK-NEXT:    vfmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2013,10 +1777,8 @@ define <vscale x 8 x double> @vmfsac_vf_nxv8f64_ta(<vscale x 8 x double> %a, dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2030,10 +1792,8 @@ define <vscale x 8 x double> @vmfsac_vf_nxv8f64_commute_ta(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %a, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %a, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll
index 518c1eacf401f4..f7f5e88bf8712f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll
@@ -564,9 +564,7 @@ define <vscale x 8 x float> @vfmul_vv_mask_nxv8f32(<vscale x 8 x float> %va, <vs
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> %splat
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> splat (float 0.0)
   %vc = fmul <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
@@ -579,11 +577,9 @@ define <vscale x 8 x float> @vfmul_vf_mask_nxv8f32(<vscale x 8 x float> %va, flo
 ; CHECK-NEXT:    vfmerge.vfm v12, v12, fa0, v0
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat0 = shufflevector <vscale x 8 x float> %head0, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   %head1 = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat1 = shufflevector <vscale x 8 x float> %head1, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> %splat0
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> splat (float 0.0)
   %vc = fmul <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index bb9d3cfed3001a..30d5919238cfa5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfmul_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -104,9 +102,7 @@ define <vscale x 1 x half> @vfmul_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -150,9 +146,7 @@ define <vscale x 2 x half> @vfmul_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -206,9 +200,7 @@ define <vscale x 2 x half> @vfmul_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,9 +244,7 @@ define <vscale x 4 x half> @vfmul_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -308,9 +298,7 @@ define <vscale x 4 x half> @vfmul_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -354,9 +342,7 @@ define <vscale x 8 x half> @vfmul_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -410,9 +396,7 @@ define <vscale x 8 x half> @vfmul_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -456,9 +440,7 @@ define <vscale x 16 x half> @vfmul_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -512,9 +494,7 @@ define <vscale x 16 x half> @vfmul_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -591,8 +571,6 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -600,6 +578,8 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -628,9 +608,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -696,8 +674,6 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -705,6 +681,8 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -726,9 +704,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -750,9 +726,7 @@ define <vscale x 1 x float> @vfmul_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -776,9 +750,7 @@ define <vscale x 1 x float> @vfmul_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -800,9 +772,7 @@ define <vscale x 2 x float> @vfmul_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -826,9 +796,7 @@ define <vscale x 2 x float> @vfmul_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -850,9 +818,7 @@ define <vscale x 4 x float> @vfmul_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -876,9 +842,7 @@ define <vscale x 4 x float> @vfmul_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -900,9 +864,7 @@ define <vscale x 8 x float> @vfmul_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -926,9 +888,7 @@ define <vscale x 8 x float> @vfmul_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -950,9 +910,7 @@ define <vscale x 16 x float> @vfmul_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -976,9 +934,7 @@ define <vscale x 16 x float> @vfmul_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1000,9 +956,7 @@ define <vscale x 1 x double> @vfmul_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1026,9 +980,7 @@ define <vscale x 1 x double> @vfmul_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1050,9 +1002,7 @@ define <vscale x 2 x double> @vfmul_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1076,9 +1026,7 @@ define <vscale x 2 x double> @vfmul_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1100,9 +1048,7 @@ define <vscale x 4 x double> @vfmul_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1126,9 +1072,7 @@ define <vscale x 4 x double> @vfmul_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1162,9 +1106,7 @@ define <vscale x 8 x double> @vfmul_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1188,8 +1130,6 @@ define <vscale x 8 x double> @vfmul_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index 582043ffb903f6..292f27794f378a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -23,9 +23,7 @@ define <vscale x 1 x half> @vfma_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -61,9 +59,7 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -75,9 +71,7 @@ define <vscale x 1 x half> @vfma_vf_nxv1f16_unmasked_commute(<vscale x 1 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -100,9 +94,7 @@ define <vscale x 2 x half> @vfma_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -152,9 +142,7 @@ define <vscale x 2 x half> @vfma_vf_nxv2f16_unmasked_commute(<vscale x 2 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -177,9 +165,7 @@ define <vscale x 4 x half> @vfma_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -215,9 +201,7 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -229,9 +213,7 @@ define <vscale x 4 x half> @vfma_vf_nxv4f16_unmasked_commute(<vscale x 4 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -254,9 +236,7 @@ define <vscale x 8 x half> @vfma_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -292,9 +272,7 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -306,9 +284,7 @@ define <vscale x 8 x half> @vfma_vf_nxv8f16_unmasked_commute(<vscale x 8 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -331,9 +307,7 @@ define <vscale x 16 x half> @vfma_vv_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -369,9 +343,7 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -383,9 +355,7 @@ define <vscale x 16 x half> @vfma_vf_nxv16f16_unmasked_commute(<vscale x 16 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -410,9 +380,7 @@ define <vscale x 32 x half> @vfma_vv_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -448,9 +416,7 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -462,9 +428,7 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16_unmasked_commute(<vscale x 32 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -487,9 +451,7 @@ define <vscale x 1 x float> @vfma_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -525,9 +487,7 @@ define <vscale x 1 x float> @vfma_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -539,9 +499,7 @@ define <vscale x 1 x float> @vfma_vf_nxv1f32_unmasked_commute(<vscale x 1 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -564,9 +522,7 @@ define <vscale x 2 x float> @vfma_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -602,9 +558,7 @@ define <vscale x 2 x float> @vfma_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -616,9 +570,7 @@ define <vscale x 2 x float> @vfma_vf_nxv2f32_unmasked_commute(<vscale x 2 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -641,9 +593,7 @@ define <vscale x 4 x float> @vfma_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -679,9 +629,7 @@ define <vscale x 4 x float> @vfma_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -693,9 +641,7 @@ define <vscale x 4 x float> @vfma_vf_nxv4f32_unmasked_commute(<vscale x 4 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -718,9 +664,7 @@ define <vscale x 8 x float> @vfma_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -756,9 +700,7 @@ define <vscale x 8 x float> @vfma_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -770,9 +712,7 @@ define <vscale x 8 x float> @vfma_vf_nxv8f32_unmasked_commute(<vscale x 8 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -797,9 +737,7 @@ define <vscale x 16 x float> @vfma_vv_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -835,9 +773,7 @@ define <vscale x 16 x float> @vfma_vf_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -849,9 +785,7 @@ define <vscale x 16 x float> @vfma_vf_nxv16f32_unmasked_commute(<vscale x 16 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -874,9 +808,7 @@ define <vscale x 1 x double> @vfma_vv_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -912,9 +844,7 @@ define <vscale x 1 x double> @vfma_vf_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -926,9 +856,7 @@ define <vscale x 1 x double> @vfma_vf_nxv1f64_unmasked_commute(<vscale x 1 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -951,9 +879,7 @@ define <vscale x 2 x double> @vfma_vv_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -989,9 +915,7 @@ define <vscale x 2 x double> @vfma_vf_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1003,9 +927,7 @@ define <vscale x 2 x double> @vfma_vf_nxv2f64_unmasked_commute(<vscale x 2 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1028,9 +950,7 @@ define <vscale x 4 x double> @vfma_vv_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1066,9 +986,7 @@ define <vscale x 4 x double> @vfma_vf_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1080,9 +998,7 @@ define <vscale x 4 x double> @vfma_vf_nxv4f64_unmasked_commute(<vscale x 4 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1107,9 +1023,7 @@ define <vscale x 7 x double> @vfma_vv_nxv7f64_unmasked(<vscale x 7 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.fmuladd.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x double> %c, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.fmuladd.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x double> %c, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1134,9 +1048,7 @@ define <vscale x 8 x double> @vfma_vv_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1172,9 +1084,7 @@ define <vscale x 8 x double> @vfma_vf_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1186,9 +1096,7 @@ define <vscale x 8 x double> @vfma_vf_nxv8f64_unmasked_commute(<vscale x 8 x dou
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1359,9 +1267,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.fmuladd.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.fmuladd.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %b, <vscale x 16 x double> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
 
@@ -1385,10 +1291,8 @@ define <vscale x 1 x half> @vfmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1426,10 +1330,8 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1441,10 +1343,8 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1479,11 +1379,9 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1493,11 +1391,9 @@ define <vscale x 1 x half> @vfnmadd_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1537,11 +1433,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1553,11 +1447,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1597,11 +1489,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1613,11 +1503,9 @@ define <vscale x 1 x half> @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1652,11 +1540,9 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1666,11 +1552,9 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16_unmasked_commuted(<vscale x 1 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negb, <vscale x 1 x half> %va, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1708,10 +1592,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negva, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1723,10 +1605,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_unmasked_commute(<vscale x 1 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %negva, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1764,10 +1644,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %negvb, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1779,10 +1657,8 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fmuladd.nxv1f16(<vscale x 1 x half> %negvb, <vscale x 1 x half> %va, <vscale x 1 x half> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -1806,10 +1682,8 @@ define <vscale x 2 x half> @vfmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1847,10 +1721,8 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1862,10 +1734,8 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1900,11 +1770,9 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1914,11 +1782,9 @@ define <vscale x 2 x half> @vfnmadd_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1958,11 +1824,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -1974,11 +1838,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2018,11 +1880,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2034,11 +1894,9 @@ define <vscale x 2 x half> @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2073,11 +1931,9 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2087,11 +1943,9 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16_unmasked_commuted(<vscale x 2 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negb, <vscale x 2 x half> %va, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2129,10 +1983,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negva, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2144,10 +1996,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_unmasked_commute(<vscale x 2 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %negva, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2185,10 +2035,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %negvb, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2200,10 +2048,8 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fmuladd.nxv2f16(<vscale x 2 x half> %negvb, <vscale x 2 x half> %va, <vscale x 2 x half> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -2227,10 +2073,8 @@ define <vscale x 4 x half> @vfmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2268,10 +2112,8 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2283,10 +2125,8 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2321,11 +2161,9 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2335,11 +2173,9 @@ define <vscale x 4 x half> @vfnmadd_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2379,11 +2215,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2395,11 +2229,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2439,11 +2271,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2455,11 +2285,9 @@ define <vscale x 4 x half> @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2494,11 +2322,9 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2508,11 +2334,9 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16_unmasked_commuted(<vscale x 4 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negb, <vscale x 4 x half> %va, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2550,10 +2374,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negva, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2565,10 +2387,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_unmasked_commute(<vscale x 4 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %negva, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2606,10 +2426,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %negvb, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2621,10 +2439,8 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fmuladd.nxv4f16(<vscale x 4 x half> %negvb, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -2648,10 +2464,8 @@ define <vscale x 8 x half> @vfmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2689,10 +2503,8 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2704,10 +2516,8 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2742,11 +2552,9 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2756,11 +2564,9 @@ define <vscale x 8 x half> @vfnmadd_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2800,11 +2606,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2816,11 +2620,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2860,11 +2662,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2876,11 +2676,9 @@ define <vscale x 8 x half> @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2915,11 +2713,9 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2929,11 +2725,9 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16_unmasked_commuted(<vscale x 8 x h
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negb, <vscale x 8 x half> %va, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2971,10 +2765,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negva, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -2986,10 +2778,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_unmasked_commute(<vscale x 8 x ha
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %negva, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3027,10 +2817,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %negvb, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3042,10 +2830,8 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute(<vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fmuladd.nxv8f16(<vscale x 8 x half> %negvb, <vscale x 8 x half> %va, <vscale x 8 x half> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -3069,10 +2855,8 @@ define <vscale x 16 x half> @vfmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3110,10 +2894,8 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3125,10 +2907,8 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3163,11 +2943,9 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3177,11 +2955,9 @@ define <vscale x 16 x half> @vfnmadd_vv_nxv16f16_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3221,11 +2997,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3237,11 +3011,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3281,11 +3053,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3297,11 +3067,9 @@ define <vscale x 16 x half> @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3336,11 +3104,9 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3350,11 +3116,9 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negb, <vscale x 16 x half> %va, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3392,10 +3156,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negva, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3407,10 +3169,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %negva, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3448,10 +3208,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %negvb, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3463,10 +3221,8 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fmuladd.nxv16f16(<vscale x 16 x half> %negvb, <vscale x 16 x half> %va, <vscale x 16 x half> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -3492,10 +3248,8 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3533,10 +3287,8 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3548,10 +3300,8 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3589,11 +3339,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3604,11 +3352,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3648,11 +3394,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3664,11 +3408,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3708,11 +3450,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3724,11 +3464,9 @@ define <vscale x 32 x half> @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %negvc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3766,11 +3504,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3781,11 +3517,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16_unmasked_commuted(<vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> %m, i32 %evl)
+  %negb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negb, <vscale x 32 x half> %va, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3823,10 +3557,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negva, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3838,10 +3570,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negva = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %negva, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3879,10 +3609,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked(<vscale x 32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %negvb, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3894,10 +3622,8 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fmuladd.nxv32f16(<vscale x 32 x half> %negvb, <vscale x 32 x half> %va, <vscale x 32 x half> %vc, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -3921,10 +3647,8 @@ define <vscale x 1 x float> @vfmsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -3962,10 +3686,8 @@ define <vscale x 1 x float> @vfmsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -3977,10 +3699,8 @@ define <vscale x 1 x float> @vfmsub_vf_nxv1f32_unmasked_commute(<vscale x 1 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4015,11 +3735,9 @@ define <vscale x 1 x float> @vfnmadd_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4029,11 +3747,9 @@ define <vscale x 1 x float> @vfnmadd_vv_nxv1f32_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4073,11 +3789,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4089,11 +3803,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_unmasked_commute(<vscale x 1 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4133,11 +3845,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4149,11 +3859,9 @@ define <vscale x 1 x float> @vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4188,11 +3896,9 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4202,11 +3908,9 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negb, <vscale x 1 x float> %va, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4244,10 +3948,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negva, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4259,10 +3961,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_unmasked_commute(<vscale x 1 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %negva, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4300,10 +4000,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_neg_splat_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %negvb, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4315,10 +4013,8 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fmuladd.nxv1f32(<vscale x 1 x float> %negvb, <vscale x 1 x float> %va, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -4342,10 +4038,8 @@ define <vscale x 2 x float> @vfmsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4383,10 +4077,8 @@ define <vscale x 2 x float> @vfmsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4398,10 +4090,8 @@ define <vscale x 2 x float> @vfmsub_vf_nxv2f32_unmasked_commute(<vscale x 2 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4436,11 +4126,9 @@ define <vscale x 2 x float> @vfnmadd_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4450,11 +4138,9 @@ define <vscale x 2 x float> @vfnmadd_vv_nxv2f32_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4494,11 +4180,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4510,11 +4194,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_unmasked_commute(<vscale x 2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4554,11 +4236,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4570,11 +4250,9 @@ define <vscale x 2 x float> @vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4609,11 +4287,9 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4623,11 +4299,9 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negb, <vscale x 2 x float> %va, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4665,10 +4339,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negva, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4680,10 +4352,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_unmasked_commute(<vscale x 2 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %negva, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4721,10 +4391,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_neg_splat_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %negvb, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4736,10 +4404,8 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fmuladd.nxv2f32(<vscale x 2 x float> %negvb, <vscale x 2 x float> %va, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -4763,10 +4429,8 @@ define <vscale x 4 x float> @vfmsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4804,10 +4468,8 @@ define <vscale x 4 x float> @vfmsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4819,10 +4481,8 @@ define <vscale x 4 x float> @vfmsub_vf_nxv4f32_unmasked_commute(<vscale x 4 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4857,11 +4517,9 @@ define <vscale x 4 x float> @vfnmadd_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4871,11 +4529,9 @@ define <vscale x 4 x float> @vfnmadd_vv_nxv4f32_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4915,11 +4571,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4931,11 +4585,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_unmasked_commute(<vscale x 4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4975,11 +4627,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -4991,11 +4641,9 @@ define <vscale x 4 x float> @vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5030,11 +4678,9 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5044,11 +4690,9 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negb, <vscale x 4 x float> %va, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5086,10 +4730,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negva, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5101,10 +4743,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_unmasked_commute(<vscale x 4 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %negva, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5142,10 +4782,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_neg_splat_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %negvb, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5157,10 +4795,8 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fmuladd.nxv4f32(<vscale x 4 x float> %negvb, <vscale x 4 x float> %va, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -5184,10 +4820,8 @@ define <vscale x 8 x float> @vfmsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5225,10 +4859,8 @@ define <vscale x 8 x float> @vfmsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5240,10 +4872,8 @@ define <vscale x 8 x float> @vfmsub_vf_nxv8f32_unmasked_commute(<vscale x 8 x fl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5278,11 +4908,9 @@ define <vscale x 8 x float> @vfnmadd_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5292,11 +4920,9 @@ define <vscale x 8 x float> @vfnmadd_vv_nxv8f32_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5336,11 +4962,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5352,11 +4976,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_unmasked_commute(<vscale x 8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5396,11 +5018,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5412,11 +5032,9 @@ define <vscale x 8 x float> @vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5451,11 +5069,9 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5465,11 +5081,9 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negb, <vscale x 8 x float> %va, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5507,10 +5121,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negva, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5522,10 +5134,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_unmasked_commute(<vscale x 8 x f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %negva, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5563,10 +5173,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_neg_splat_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %negvb, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5578,10 +5186,8 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute(<vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fmuladd.nxv8f32(<vscale x 8 x float> %negvb, <vscale x 8 x float> %va, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -5607,10 +5213,8 @@ define <vscale x 16 x float> @vfmsub_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5648,10 +5252,8 @@ define <vscale x 16 x float> @vfmsub_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5663,10 +5265,8 @@ define <vscale x 16 x float> @vfmsub_vf_nxv16f32_unmasked_commute(<vscale x 16 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5704,11 +5304,9 @@ define <vscale x 16 x float> @vfnmadd_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5719,11 +5317,9 @@ define <vscale x 16 x float> @vfnmadd_vv_nxv16f32_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5763,11 +5359,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5779,11 +5373,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_unmasked_commute(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5823,11 +5415,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5839,11 +5429,9 @@ define <vscale x 16 x float> @vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute(<vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %negvc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5881,11 +5469,9 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5896,11 +5482,9 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32_unmasked_commuted(<vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> %m, i32 %evl)
+  %negb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negb, <vscale x 16 x float> %va, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5938,10 +5522,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negva, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5953,10 +5535,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_unmasked_commute(<vscale x 16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negva = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %negva, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -5994,10 +5574,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %negvb, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -6009,10 +5587,8 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute(<vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fmuladd.nxv16f32(<vscale x 16 x float> %negvb, <vscale x 16 x float> %va, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -6036,10 +5612,8 @@ define <vscale x 1 x double> @vfmsub_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6077,10 +5651,8 @@ define <vscale x 1 x double> @vfmsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6092,10 +5664,8 @@ define <vscale x 1 x double> @vfmsub_vf_nxv1f64_unmasked_commute(<vscale x 1 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6130,11 +5700,9 @@ define <vscale x 1 x double> @vfnmadd_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6144,11 +5712,9 @@ define <vscale x 1 x double> @vfnmadd_vv_nxv1f64_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6188,11 +5754,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6204,11 +5768,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_unmasked_commute(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6248,11 +5810,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6264,11 +5824,9 @@ define <vscale x 1 x double> @vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %negvc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6303,11 +5861,9 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6317,11 +5873,9 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64_unmasked_commuted(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> %m, i32 %evl)
+  %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negb, <vscale x 1 x double> %va, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6359,10 +5913,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negva, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6374,10 +5926,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_unmasked_commute(<vscale x 1 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negva = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %negva, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6415,10 +5965,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_neg_splat_unmasked(<vscale x 1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %negvb, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6430,10 +5978,8 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fmuladd.nxv1f64(<vscale x 1 x double> %negvb, <vscale x 1 x double> %va, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -6457,10 +6003,8 @@ define <vscale x 2 x double> @vfmsub_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6498,10 +6042,8 @@ define <vscale x 2 x double> @vfmsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6513,10 +6055,8 @@ define <vscale x 2 x double> @vfmsub_vf_nxv2f64_unmasked_commute(<vscale x 2 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6551,11 +6091,9 @@ define <vscale x 2 x double> @vfnmadd_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6565,11 +6103,9 @@ define <vscale x 2 x double> @vfnmadd_vv_nxv2f64_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6609,11 +6145,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6625,11 +6159,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_unmasked_commute(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6669,11 +6201,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_neg_splat_unmasked(<vscale x 2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6685,11 +6215,9 @@ define <vscale x 2 x double> @vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %negvc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6724,11 +6252,9 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6738,11 +6264,9 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64_unmasked_commuted(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> %m, i32 %evl)
+  %negb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negb, <vscale x 2 x double> %va, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6780,10 +6304,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negva, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6795,10 +6317,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_unmasked_commute(<vscale x 2 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negva = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %negva, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6836,10 +6356,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_neg_splat_unmasked(<vscale x 2
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %negvb, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6851,10 +6369,8 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fmuladd.nxv2f64(<vscale x 2 x double> %negvb, <vscale x 2 x double> %va, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -6878,10 +6394,8 @@ define <vscale x 4 x double> @vfmsub_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6919,10 +6433,8 @@ define <vscale x 4 x double> @vfmsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6934,10 +6446,8 @@ define <vscale x 4 x double> @vfmsub_vf_nxv4f64_unmasked_commute(<vscale x 4 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6972,11 +6482,9 @@ define <vscale x 4 x double> @vfnmadd_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -6986,11 +6494,9 @@ define <vscale x 4 x double> @vfnmadd_vv_nxv4f64_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7030,11 +6536,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7046,11 +6550,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_unmasked_commute(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7090,11 +6592,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_neg_splat_unmasked(<vscale x 4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7106,11 +6606,9 @@ define <vscale x 4 x double> @vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %negvc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7145,11 +6643,9 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7159,11 +6655,9 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64_unmasked_commuted(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> %m, i32 %evl)
+  %negb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negb, <vscale x 4 x double> %va, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7201,10 +6695,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negva, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7216,10 +6708,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_unmasked_commute(<vscale x 4 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negva = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %negva, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7257,10 +6747,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_neg_splat_unmasked(<vscale x 4
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %negvb, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7272,10 +6760,8 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fmuladd.nxv4f64(<vscale x 4 x double> %negvb, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -7301,10 +6787,8 @@ define <vscale x 8 x double> @vfmsub_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmsub.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7342,10 +6826,8 @@ define <vscale x 8 x double> @vfmsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7357,10 +6839,8 @@ define <vscale x 8 x double> @vfmsub_vf_nxv8f64_unmasked_commute(<vscale x 8 x d
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7398,11 +6878,9 @@ define <vscale x 8 x double> @vfnmadd_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7413,11 +6891,9 @@ define <vscale x 8 x double> @vfnmadd_vv_nxv8f64_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7457,11 +6933,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7473,11 +6947,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_unmasked_commute(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7517,11 +6989,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_neg_splat_unmasked(<vscale x 8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7533,11 +7003,9 @@ define <vscale x 8 x double> @vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negvc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %negvc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7575,11 +7043,9 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7590,11 +7056,9 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64_unmasked_commuted(<vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfnmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> %m, i32 %evl)
+  %negb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negb, <vscale x 8 x double> %va, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7632,10 +7096,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negva, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7647,10 +7109,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_unmasked_commute(<vscale x 8 x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negva = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %negva, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7688,10 +7148,8 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_neg_splat_unmasked(<vscale x 8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %negvb, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -7703,9 +7161,7 @@ define <vscale x 8 x double> @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute(<vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> %m, i32 %evl)
+  %negvb = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fmuladd.nxv8f64(<vscale x 8 x double> %negvb, <vscale x 8 x double> %va, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index ef08865100f14b..1db5fa1720a276 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -46,9 +46,7 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -222,9 +214,7 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -279,8 +269,6 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -288,6 +276,8 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -306,9 +296,7 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -330,9 +318,7 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -354,9 +340,7 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -378,9 +362,7 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -402,9 +384,7 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -426,9 +406,7 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -450,9 +428,7 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -474,9 +450,7 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -498,9 +472,7 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -522,9 +494,7 @@ define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -546,9 +516,7 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -598,8 +566,6 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll
index e642e89b3dff05..ee3ed603ff6d79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll
@@ -16,11 +16,9 @@ define <vscale x 1 x half> @vfnmacc_vv_nxv1f16(<vscale x 1 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -32,12 +30,10 @@ define <vscale x 1 x half> @vfnmacc_vv_nxv1f16_unmasked(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -50,11 +46,9 @@ define <vscale x 1 x half> @vfnmacc_vf_nxv1f16(<vscale x 1 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -68,11 +62,9 @@ define <vscale x 1 x half> @vfnmacc_vf_nxv1f16_commute(<vscale x 1 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -86,12 +78,10 @@ define <vscale x 1 x half> @vfnmacc_vf_nxv1f16_unmasked(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -102,11 +92,9 @@ define <vscale x 1 x half> @vfnmacc_vv_nxv1f16_ta(<vscale x 1 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -120,11 +108,9 @@ define <vscale x 1 x half> @vfnmacc_vf_nxv1f16_ta(<vscale x 1 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -138,11 +124,9 @@ define <vscale x 1 x half> @vfnmacc_vf_nxv1f16_commute_ta(<vscale x 1 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -159,11 +143,9 @@ define <vscale x 2 x half> @vfnmacc_vv_nxv2f16(<vscale x 2 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -175,12 +157,10 @@ define <vscale x 2 x half> @vfnmacc_vv_nxv2f16_unmasked(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -193,11 +173,9 @@ define <vscale x 2 x half> @vfnmacc_vf_nxv2f16(<vscale x 2 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -211,11 +189,9 @@ define <vscale x 2 x half> @vfnmacc_vf_nxv2f16_commute(<vscale x 2 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -229,12 +205,10 @@ define <vscale x 2 x half> @vfnmacc_vf_nxv2f16_unmasked(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -245,11 +219,9 @@ define <vscale x 2 x half> @vfnmacc_vv_nxv2f16_ta(<vscale x 2 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -263,11 +235,9 @@ define <vscale x 2 x half> @vfnmacc_vf_nxv2f16_ta(<vscale x 2 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -281,11 +251,9 @@ define <vscale x 2 x half> @vfnmacc_vf_nxv2f16_commute_ta(<vscale x 2 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -302,11 +270,9 @@ define <vscale x 4 x half> @vfnmacc_vv_nxv4f16(<vscale x 4 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -318,12 +284,10 @@ define <vscale x 4 x half> @vfnmacc_vv_nxv4f16_unmasked(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -336,11 +300,9 @@ define <vscale x 4 x half> @vfnmacc_vf_nxv4f16(<vscale x 4 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -354,11 +316,9 @@ define <vscale x 4 x half> @vfnmacc_vf_nxv4f16_commute(<vscale x 4 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -372,12 +332,10 @@ define <vscale x 4 x half> @vfnmacc_vf_nxv4f16_unmasked(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -388,11 +346,9 @@ define <vscale x 4 x half> @vfnmacc_vv_nxv4f16_ta(<vscale x 4 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -406,11 +362,9 @@ define <vscale x 4 x half> @vfnmacc_vf_nxv4f16_ta(<vscale x 4 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -424,11 +378,9 @@ define <vscale x 4 x half> @vfnmacc_vf_nxv4f16_commute_ta(<vscale x 4 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -445,11 +397,9 @@ define <vscale x 8 x half> @vfnmacc_vv_nxv8f16(<vscale x 8 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -461,12 +411,10 @@ define <vscale x 8 x half> @vfnmacc_vv_nxv8f16_unmasked(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -479,11 +427,9 @@ define <vscale x 8 x half> @vfnmacc_vf_nxv8f16(<vscale x 8 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -497,11 +443,9 @@ define <vscale x 8 x half> @vfnmacc_vf_nxv8f16_commute(<vscale x 8 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -515,12 +459,10 @@ define <vscale x 8 x half> @vfnmacc_vf_nxv8f16_unmasked(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -531,11 +473,9 @@ define <vscale x 8 x half> @vfnmacc_vv_nxv8f16_ta(<vscale x 8 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -549,11 +489,9 @@ define <vscale x 8 x half> @vfnmacc_vf_nxv8f16_ta(<vscale x 8 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -567,11 +505,9 @@ define <vscale x 8 x half> @vfnmacc_vf_nxv8f16_commute_ta(<vscale x 8 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -588,11 +524,9 @@ define <vscale x 16 x half> @vfnmacc_vv_nxv16f16(<vscale x 16 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -604,12 +538,10 @@ define <vscale x 16 x half> @vfnmacc_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -622,11 +554,9 @@ define <vscale x 16 x half> @vfnmacc_vf_nxv16f16(<vscale x 16 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -640,11 +570,9 @@ define <vscale x 16 x half> @vfnmacc_vf_nxv16f16_commute(<vscale x 16 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -658,12 +586,10 @@ define <vscale x 16 x half> @vfnmacc_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -674,11 +600,9 @@ define <vscale x 16 x half> @vfnmacc_vv_nxv16f16_ta(<vscale x 16 x half> %a, <vs
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -692,11 +616,9 @@ define <vscale x 16 x half> @vfnmacc_vf_nxv16f16_ta(<vscale x 16 x half> %a, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -710,11 +632,9 @@ define <vscale x 16 x half> @vfnmacc_vf_nxv16f16_commute_ta(<vscale x 16 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -732,11 +652,9 @@ define <vscale x 32 x half> @vfnmacc_vv_nxv32f16(<vscale x 32 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -749,12 +667,10 @@ define <vscale x 32 x half> @vfnmacc_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -767,11 +683,9 @@ define <vscale x 32 x half> @vfnmacc_vf_nxv32f16(<vscale x 32 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -785,11 +699,9 @@ define <vscale x 32 x half> @vfnmacc_vf_nxv32f16_commute(<vscale x 32 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -803,12 +715,10 @@ define <vscale x 32 x half> @vfnmacc_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -820,11 +730,9 @@ define <vscale x 32 x half> @vfnmacc_vv_nxv32f16_ta(<vscale x 32 x half> %a, <vs
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -838,11 +746,9 @@ define <vscale x 32 x half> @vfnmacc_vf_nxv32f16_ta(<vscale x 32 x half> %a, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -856,11 +762,9 @@ define <vscale x 32 x half> @vfnmacc_vf_nxv32f16_commute_ta(<vscale x 32 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %negc, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %negc, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -877,11 +781,9 @@ define <vscale x 1 x float> @vfnmacc_vv_nxv1f32(<vscale x 1 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -893,12 +795,10 @@ define <vscale x 1 x float> @vfnmacc_vv_nxv1f32_unmasked(<vscale x 1 x float> %a
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -911,11 +811,9 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32(<vscale x 1 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -929,11 +827,9 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32_commute(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -947,12 +843,10 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32_unmasked(<vscale x 1 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -963,11 +857,9 @@ define <vscale x 1 x float> @vfnmacc_vv_nxv1f32_ta(<vscale x 1 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -981,11 +873,9 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32_ta(<vscale x 1 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -999,11 +889,9 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32_commute_ta(<vscale x 1 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -1020,11 +908,9 @@ define <vscale x 2 x float> @vfnmacc_vv_nxv2f32(<vscale x 2 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1036,12 +922,10 @@ define <vscale x 2 x float> @vfnmacc_vv_nxv2f32_unmasked(<vscale x 2 x float> %a
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -1054,11 +938,9 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32(<vscale x 2 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1072,11 +954,9 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32_commute(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1090,12 +970,10 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32_unmasked(<vscale x 2 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -1106,11 +984,9 @@ define <vscale x 2 x float> @vfnmacc_vv_nxv2f32_ta(<vscale x 2 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1124,11 +1000,9 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32_ta(<vscale x 2 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1142,11 +1016,9 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32_commute_ta(<vscale x 2 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1163,11 +1035,9 @@ define <vscale x 4 x float> @vfnmacc_vv_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1179,12 +1049,10 @@ define <vscale x 4 x float> @vfnmacc_vv_nxv4f32_unmasked(<vscale x 4 x float> %a
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1197,11 +1065,9 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32(<vscale x 4 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1215,11 +1081,9 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32_commute(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1233,12 +1097,10 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32_unmasked(<vscale x 4 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1249,11 +1111,9 @@ define <vscale x 4 x float> @vfnmacc_vv_nxv4f32_ta(<vscale x 4 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1267,11 +1127,9 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32_ta(<vscale x 4 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1285,11 +1143,9 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32_commute_ta(<vscale x 4 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1306,11 +1162,9 @@ define <vscale x 8 x float> @vfnmacc_vv_nxv8f32(<vscale x 8 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1322,12 +1176,10 @@ define <vscale x 8 x float> @vfnmacc_vv_nxv8f32_unmasked(<vscale x 8 x float> %a
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1340,11 +1192,9 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32(<vscale x 8 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1358,11 +1208,9 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32_commute(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1376,12 +1224,10 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32_unmasked(<vscale x 8 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1392,11 +1238,9 @@ define <vscale x 8 x float> @vfnmacc_vv_nxv8f32_ta(<vscale x 8 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1410,11 +1254,9 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32_ta(<vscale x 8 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1428,11 +1270,9 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32_commute_ta(<vscale x 8 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1450,11 +1290,9 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32(<vscale x 16 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1467,12 +1305,10 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1485,11 +1321,9 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32(<vscale x 16 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1503,11 +1337,9 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_commute(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1521,12 +1353,10 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1538,11 +1368,9 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32_ta(<vscale x 16 x float> %a, <
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1556,11 +1384,9 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_ta(<vscale x 16 x float> %a, f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1574,11 +1400,9 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_commute_ta(<vscale x 16 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1595,11 +1419,9 @@ define <vscale x 1 x double> @vfnmacc_vv_nxv1f64(<vscale x 1 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1611,12 +1433,10 @@ define <vscale x 1 x double> @vfnmacc_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1629,11 +1449,9 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64(<vscale x 1 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1647,11 +1465,9 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64_commute(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1665,12 +1481,10 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1681,11 +1495,9 @@ define <vscale x 1 x double> @vfnmacc_vv_nxv1f64_ta(<vscale x 1 x double> %a, <v
 ; CHECK-NEXT:    vfnmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1699,11 +1511,9 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64_ta(<vscale x 1 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1717,11 +1527,9 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64_commute_ta(<vscale x 1 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1738,11 +1546,9 @@ define <vscale x 2 x double> @vfnmacc_vv_nxv2f64(<vscale x 2 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1754,12 +1560,10 @@ define <vscale x 2 x double> @vfnmacc_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1772,11 +1576,9 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64(<vscale x 2 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1790,11 +1592,9 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64_commute(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1808,12 +1608,10 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1824,11 +1622,9 @@ define <vscale x 2 x double> @vfnmacc_vv_nxv2f64_ta(<vscale x 2 x double> %a, <v
 ; CHECK-NEXT:    vfnmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1842,11 +1638,9 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64_ta(<vscale x 2 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1860,11 +1654,9 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64_commute_ta(<vscale x 2 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1881,11 +1673,9 @@ define <vscale x 4 x double> @vfnmacc_vv_nxv4f64(<vscale x 4 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1897,12 +1687,10 @@ define <vscale x 4 x double> @vfnmacc_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1915,11 +1703,9 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64(<vscale x 4 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1933,11 +1719,9 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64_commute(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1951,12 +1735,10 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1967,11 +1749,9 @@ define <vscale x 4 x double> @vfnmacc_vv_nxv4f64_ta(<vscale x 4 x double> %a, <v
 ; CHECK-NEXT:    vfnmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1985,11 +1765,9 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64_ta(<vscale x 4 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -2003,11 +1781,9 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64_commute_ta(<vscale x 4 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -2025,11 +1801,9 @@ define <vscale x 8 x double> @vfnmacc_vv_nxv8f64(<vscale x 8 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2042,12 +1816,10 @@ define <vscale x 8 x double> @vfnmacc_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -2060,11 +1832,9 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64(<vscale x 8 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2078,11 +1848,9 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64_commute(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2096,12 +1864,10 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -2113,11 +1879,9 @@ define <vscale x 8 x double> @vfnmacc_vv_nxv8f64_ta(<vscale x 8 x double> %a, <v
 ; CHECK-NEXT:    vfnmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2131,11 +1895,9 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64_ta(<vscale x 8 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2149,11 +1911,9 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64_commute_ta(<vscale x 8 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll
index d906b6450a93da..14dba24daf5ffa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll
@@ -16,10 +16,8 @@ define <vscale x 1 x half> @vfnmsac_vv_nxv1f16(<vscale x 1 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -31,11 +29,9 @@ define <vscale x 1 x half> @vfnmsac_vv_nxv1f16_unmasked(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -48,10 +44,8 @@ define <vscale x 1 x half> @vfnmsac_vf_nxv1f16(<vscale x 1 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -65,10 +59,8 @@ define <vscale x 1 x half> @vfnmsac_vf_nxv1f16_commute(<vscale x 1 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -82,11 +74,9 @@ define <vscale x 1 x half> @vfnmsac_vf_nxv1f16_unmasked(<vscale x 1 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> %allones, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x half> @llvm.vp.merge.nxv1f16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
 
@@ -97,10 +87,8 @@ define <vscale x 1 x half> @vfnmsac_vv_nxv1f16_ta(<vscale x 1 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %b, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -114,10 +102,8 @@ define <vscale x 1 x half> @vfnmsac_vf_nxv1f16_ta(<vscale x 1 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %nega, <vscale x 1 x half> %vb, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -131,10 +117,8 @@ define <vscale x 1 x half> @vfnmsac_vf_nxv1f16_commute_ta(<vscale x 1 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fma.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %nega, <vscale x 1 x half> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x half> @llvm.vp.select.nxv1f16(<vscale x 1 x i1> %m, <vscale x 1 x half> %v, <vscale x 1 x half> %c, i32 %evl)
   ret <vscale x 1 x half> %u
 }
@@ -151,10 +135,8 @@ define <vscale x 2 x half> @vfnmsac_vv_nxv2f16(<vscale x 2 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -166,11 +148,9 @@ define <vscale x 2 x half> @vfnmsac_vv_nxv2f16_unmasked(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -183,10 +163,8 @@ define <vscale x 2 x half> @vfnmsac_vf_nxv2f16(<vscale x 2 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -200,10 +178,8 @@ define <vscale x 2 x half> @vfnmsac_vf_nxv2f16_commute(<vscale x 2 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -217,11 +193,9 @@ define <vscale x 2 x half> @vfnmsac_vf_nxv2f16_unmasked(<vscale x 2 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> %allones, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x half> @llvm.vp.merge.nxv2f16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
 
@@ -232,10 +206,8 @@ define <vscale x 2 x half> @vfnmsac_vv_nxv2f16_ta(<vscale x 2 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %b, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -249,10 +221,8 @@ define <vscale x 2 x half> @vfnmsac_vf_nxv2f16_ta(<vscale x 2 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %nega, <vscale x 2 x half> %vb, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -266,10 +236,8 @@ define <vscale x 2 x half> @vfnmsac_vf_nxv2f16_commute_ta(<vscale x 2 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fma.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %nega, <vscale x 2 x half> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x half> @llvm.vp.select.nxv2f16(<vscale x 2 x i1> %m, <vscale x 2 x half> %v, <vscale x 2 x half> %c, i32 %evl)
   ret <vscale x 2 x half> %u
 }
@@ -286,10 +254,8 @@ define <vscale x 4 x half> @vfnmsac_vv_nxv4f16(<vscale x 4 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -301,11 +267,9 @@ define <vscale x 4 x half> @vfnmsac_vv_nxv4f16_unmasked(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -318,10 +282,8 @@ define <vscale x 4 x half> @vfnmsac_vf_nxv4f16(<vscale x 4 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -335,10 +297,8 @@ define <vscale x 4 x half> @vfnmsac_vf_nxv4f16_commute(<vscale x 4 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -352,11 +312,9 @@ define <vscale x 4 x half> @vfnmsac_vf_nxv4f16_unmasked(<vscale x 4 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> %allones, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x half> @llvm.vp.merge.nxv4f16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
 
@@ -367,10 +325,8 @@ define <vscale x 4 x half> @vfnmsac_vv_nxv4f16_ta(<vscale x 4 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %b, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -384,10 +340,8 @@ define <vscale x 4 x half> @vfnmsac_vf_nxv4f16_ta(<vscale x 4 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %nega, <vscale x 4 x half> %vb, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -401,10 +355,8 @@ define <vscale x 4 x half> @vfnmsac_vf_nxv4f16_commute_ta(<vscale x 4 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fma.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %nega, <vscale x 4 x half> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x half> @llvm.vp.select.nxv4f16(<vscale x 4 x i1> %m, <vscale x 4 x half> %v, <vscale x 4 x half> %c, i32 %evl)
   ret <vscale x 4 x half> %u
 }
@@ -421,10 +373,8 @@ define <vscale x 8 x half> @vfnmsac_vv_nxv8f16(<vscale x 8 x half> %a, <vscale x
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -436,11 +386,9 @@ define <vscale x 8 x half> @vfnmsac_vv_nxv8f16_unmasked(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -453,10 +401,8 @@ define <vscale x 8 x half> @vfnmsac_vf_nxv8f16(<vscale x 8 x half> %a, half %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -470,10 +416,8 @@ define <vscale x 8 x half> @vfnmsac_vf_nxv8f16_commute(<vscale x 8 x half> %a, h
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -487,11 +431,9 @@ define <vscale x 8 x half> @vfnmsac_vf_nxv8f16_unmasked(<vscale x 8 x half> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> %allones, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
 
@@ -502,10 +444,8 @@ define <vscale x 8 x half> @vfnmsac_vv_nxv8f16_ta(<vscale x 8 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %b, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -519,10 +459,8 @@ define <vscale x 8 x half> @vfnmsac_vf_nxv8f16_ta(<vscale x 8 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %nega, <vscale x 8 x half> %vb, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -536,10 +474,8 @@ define <vscale x 8 x half> @vfnmsac_vf_nxv8f16_commute_ta(<vscale x 8 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %nega, <vscale x 8 x half> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x half> @llvm.vp.select.nxv8f16(<vscale x 8 x i1> %m, <vscale x 8 x half> %v, <vscale x 8 x half> %c, i32 %evl)
   ret <vscale x 8 x half> %u
 }
@@ -556,10 +492,8 @@ define <vscale x 16 x half> @vfnmsac_vv_nxv16f16(<vscale x 16 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -571,11 +505,9 @@ define <vscale x 16 x half> @vfnmsac_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -588,10 +520,8 @@ define <vscale x 16 x half> @vfnmsac_vf_nxv16f16(<vscale x 16 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -605,10 +535,8 @@ define <vscale x 16 x half> @vfnmsac_vf_nxv16f16_commute(<vscale x 16 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -622,11 +550,9 @@ define <vscale x 16 x half> @vfnmsac_vf_nxv16f16_unmasked(<vscale x 16 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> %allones, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x half> @llvm.vp.merge.nxv16f16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
 
@@ -637,10 +563,8 @@ define <vscale x 16 x half> @vfnmsac_vv_nxv16f16_ta(<vscale x 16 x half> %a, <vs
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %b, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -654,10 +578,8 @@ define <vscale x 16 x half> @vfnmsac_vf_nxv16f16_ta(<vscale x 16 x half> %a, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %nega, <vscale x 16 x half> %vb, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -671,10 +593,8 @@ define <vscale x 16 x half> @vfnmsac_vf_nxv16f16_commute_ta(<vscale x 16 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fma.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %nega, <vscale x 16 x half> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x half> @llvm.vp.select.nxv16f16(<vscale x 16 x i1> %m, <vscale x 16 x half> %v, <vscale x 16 x half> %c, i32 %evl)
   ret <vscale x 16 x half> %u
 }
@@ -692,10 +612,8 @@ define <vscale x 32 x half> @vfnmsac_vv_nxv32f16(<vscale x 32 x half> %a, <vscal
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -708,11 +626,9 @@ define <vscale x 32 x half> @vfnmsac_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -725,10 +641,8 @@ define <vscale x 32 x half> @vfnmsac_vf_nxv32f16(<vscale x 32 x half> %a, half %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -742,10 +656,8 @@ define <vscale x 32 x half> @vfnmsac_vf_nxv32f16_commute(<vscale x 32 x half> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -759,11 +671,9 @@ define <vscale x 32 x half> @vfnmsac_vf_nxv32f16_unmasked(<vscale x 32 x half> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> %allones, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x half> @llvm.vp.merge.nxv32f16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
 
@@ -775,10 +685,8 @@ define <vscale x 32 x half> @vfnmsac_vv_nxv32f16_ta(<vscale x 32 x half> %a, <vs
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %b, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -792,10 +700,8 @@ define <vscale x 32 x half> @vfnmsac_vf_nxv32f16_ta(<vscale x 32 x half> %a, hal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %nega, <vscale x 32 x half> %vb, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -809,10 +715,8 @@ define <vscale x 32 x half> @vfnmsac_vf_nxv32f16_commute_ta(<vscale x 32 x half>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %allones, i32 %evl)
-  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fma.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %nega, <vscale x 32 x half> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x half> @llvm.vp.select.nxv32f16(<vscale x 32 x i1> %m, <vscale x 32 x half> %v, <vscale x 32 x half> %c, i32 %evl)
   ret <vscale x 32 x half> %u
 }
@@ -829,10 +733,8 @@ define <vscale x 1 x float> @vfnmsac_vv_nxv1f32(<vscale x 1 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -844,11 +746,9 @@ define <vscale x 1 x float> @vfnmsac_vv_nxv1f32_unmasked(<vscale x 1 x float> %a
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -861,10 +761,8 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32(<vscale x 1 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -878,10 +776,8 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_commute(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -895,11 +791,9 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_unmasked(<vscale x 1 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -910,10 +804,8 @@ define <vscale x 1 x float> @vfnmsac_vv_nxv1f32_ta(<vscale x 1 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -927,10 +819,8 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_ta(<vscale x 1 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vb, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -944,10 +834,8 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_commute_ta(<vscale x 1 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %nega, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.select.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -964,10 +852,8 @@ define <vscale x 2 x float> @vfnmsac_vv_nxv2f32(<vscale x 2 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -979,11 +865,9 @@ define <vscale x 2 x float> @vfnmsac_vv_nxv2f32_unmasked(<vscale x 2 x float> %a
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -996,10 +880,8 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32(<vscale x 2 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1013,10 +895,8 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_commute(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1030,11 +910,9 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_unmasked(<vscale x 2 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> %allones, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x float> @llvm.vp.merge.nxv2f32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
 
@@ -1045,10 +923,8 @@ define <vscale x 2 x float> @vfnmsac_vv_nxv2f32_ta(<vscale x 2 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1062,10 +938,8 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_ta(<vscale x 2 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vb, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1079,10 +953,8 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_commute_ta(<vscale x 2 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %nega, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x float> @llvm.vp.select.nxv2f32(<vscale x 2 x i1> %m, <vscale x 2 x float> %v, <vscale x 2 x float> %c, i32 %evl)
   ret <vscale x 2 x float> %u
 }
@@ -1099,10 +971,8 @@ define <vscale x 4 x float> @vfnmsac_vv_nxv4f32(<vscale x 4 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1114,11 +984,9 @@ define <vscale x 4 x float> @vfnmsac_vv_nxv4f32_unmasked(<vscale x 4 x float> %a
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1131,10 +999,8 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32(<vscale x 4 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1148,10 +1014,8 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_commute(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1165,11 +1029,9 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_unmasked(<vscale x 4 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> %allones, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
 
@@ -1180,10 +1042,8 @@ define <vscale x 4 x float> @vfnmsac_vv_nxv4f32_ta(<vscale x 4 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1197,10 +1057,8 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_ta(<vscale x 4 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vb, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1214,10 +1072,8 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_commute_ta(<vscale x 4 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %nega, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x float> @llvm.vp.select.nxv4f32(<vscale x 4 x i1> %m, <vscale x 4 x float> %v, <vscale x 4 x float> %c, i32 %evl)
   ret <vscale x 4 x float> %u
 }
@@ -1234,10 +1090,8 @@ define <vscale x 8 x float> @vfnmsac_vv_nxv8f32(<vscale x 8 x float> %a, <vscale
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1249,11 +1103,9 @@ define <vscale x 8 x float> @vfnmsac_vv_nxv8f32_unmasked(<vscale x 8 x float> %a
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1266,10 +1118,8 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32(<vscale x 8 x float> %a, float %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1283,10 +1133,8 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_commute(<vscale x 8 x float> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1300,11 +1148,9 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_unmasked(<vscale x 8 x float> %a
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> %allones, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x float> @llvm.vp.merge.nxv8f32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
 
@@ -1315,10 +1161,8 @@ define <vscale x 8 x float> @vfnmsac_vv_nxv8f32_ta(<vscale x 8 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1332,10 +1176,8 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_ta(<vscale x 8 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vb, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1349,10 +1191,8 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_commute_ta(<vscale x 8 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %nega, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x float> @llvm.vp.select.nxv8f32(<vscale x 8 x i1> %m, <vscale x 8 x float> %v, <vscale x 8 x float> %c, i32 %evl)
   ret <vscale x 8 x float> %u
 }
@@ -1370,10 +1210,8 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x float> %a, <vsc
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1386,11 +1224,9 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1403,10 +1239,8 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32(<vscale x 16 x float> %a, floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1420,10 +1254,8 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_commute(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1437,11 +1269,9 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> %allones, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x float> @llvm.vp.merge.nxv16f32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
 
@@ -1453,10 +1283,8 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32_ta(<vscale x 16 x float> %a, <
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1470,10 +1298,8 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_ta(<vscale x 16 x float> %a, f
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vb, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1487,10 +1313,8 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_commute_ta(<vscale x 16 x floa
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %nega, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x float> @llvm.vp.select.nxv16f32(<vscale x 16 x i1> %m, <vscale x 16 x float> %v, <vscale x 16 x float> %c, i32 %evl)
   ret <vscale x 16 x float> %u
 }
@@ -1507,10 +1331,8 @@ define <vscale x 1 x double> @vfnmsac_vv_nxv1f64(<vscale x 1 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1522,11 +1344,9 @@ define <vscale x 1 x double> @vfnmsac_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1539,10 +1359,8 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64(<vscale x 1 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1556,10 +1374,8 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_commute(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1573,11 +1389,9 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_unmasked(<vscale x 1 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> %allones, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x double> @llvm.vp.merge.nxv1f64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
 
@@ -1588,10 +1402,8 @@ define <vscale x 1 x double> @vfnmsac_vv_nxv1f64_ta(<vscale x 1 x double> %a, <v
 ; CHECK-NEXT:    vfnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1605,10 +1417,8 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_ta(<vscale x 1 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vb, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1622,10 +1432,8 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_commute_ta(<vscale x 1 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %nega, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x double> @llvm.vp.select.nxv1f64(<vscale x 1 x i1> %m, <vscale x 1 x double> %v, <vscale x 1 x double> %c, i32 %evl)
   ret <vscale x 1 x double> %u
 }
@@ -1642,10 +1450,8 @@ define <vscale x 2 x double> @vfnmsac_vv_nxv2f64(<vscale x 2 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1657,11 +1463,9 @@ define <vscale x 2 x double> @vfnmsac_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1674,10 +1478,8 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64(<vscale x 2 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1691,10 +1493,8 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_commute(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1708,11 +1508,9 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_unmasked(<vscale x 2 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> %allones, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x double> @llvm.vp.merge.nxv2f64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
 
@@ -1723,10 +1521,8 @@ define <vscale x 2 x double> @vfnmsac_vv_nxv2f64_ta(<vscale x 2 x double> %a, <v
 ; CHECK-NEXT:    vfnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1740,10 +1536,8 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_ta(<vscale x 2 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vb, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1757,10 +1551,8 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_commute_ta(<vscale x 2 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %nega, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x double> @llvm.vp.select.nxv2f64(<vscale x 2 x i1> %m, <vscale x 2 x double> %v, <vscale x 2 x double> %c, i32 %evl)
   ret <vscale x 2 x double> %u
 }
@@ -1777,10 +1569,8 @@ define <vscale x 4 x double> @vfnmsac_vv_nxv4f64(<vscale x 4 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1792,11 +1582,9 @@ define <vscale x 4 x double> @vfnmsac_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1809,10 +1597,8 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64(<vscale x 4 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1826,10 +1612,8 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_commute(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1843,11 +1627,9 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_unmasked(<vscale x 4 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> %allones, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x double> @llvm.vp.merge.nxv4f64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
 
@@ -1858,10 +1640,8 @@ define <vscale x 4 x double> @vfnmsac_vv_nxv4f64_ta(<vscale x 4 x double> %a, <v
 ; CHECK-NEXT:    vfnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1875,10 +1655,8 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_ta(<vscale x 4 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vb, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1892,10 +1670,8 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_commute_ta(<vscale x 4 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %nega, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x double> @llvm.vp.select.nxv4f64(<vscale x 4 x i1> %m, <vscale x 4 x double> %v, <vscale x 4 x double> %c, i32 %evl)
   ret <vscale x 4 x double> %u
 }
@@ -1913,10 +1689,8 @@ define <vscale x 8 x double> @vfnmsac_vv_nxv8f64(<vscale x 8 x double> %a, <vsca
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1929,11 +1703,9 @@ define <vscale x 8 x double> @vfnmsac_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1946,10 +1718,8 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64(<vscale x 8 x double> %a, doubl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1963,10 +1733,8 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_commute(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -1980,11 +1748,9 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_unmasked(<vscale x 8 x double>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> %allones, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x double> @llvm.vp.merge.nxv8f64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
 
@@ -1996,10 +1762,8 @@ define <vscale x 8 x double> @vfnmsac_vv_nxv8f64_ta(<vscale x 8 x double> %a, <v
 ; CHECK-NEXT:    vfnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2013,10 +1777,8 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_ta(<vscale x 8 x double> %a, do
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vb, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
@@ -2030,10 +1792,8 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_commute_ta(<vscale x 8 x double
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %nega, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x double> @llvm.vp.select.nxv8f64(<vscale x 8 x i1> %m, <vscale x 8 x double> %v, <vscale x 8 x double> %c, i32 %evl)
   ret <vscale x 8 x double> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll
index d7bb1ad7726db1..876f8d94563869 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll
@@ -26,9 +26,7 @@ define <vscale x 1 x half> @vfrdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -54,9 +52,7 @@ define <vscale x 2 x half> @vfrdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -82,9 +78,7 @@ define <vscale x 4 x half> @vfrdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -110,9 +104,7 @@ define <vscale x 8 x half> @vfrdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <vscale x 16 x half> @vfrdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -166,9 +156,7 @@ define <vscale x 32 x half> @vfrdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -194,9 +182,7 @@ define <vscale x 1 x float> @vfrdiv_vf_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -222,9 +208,7 @@ define <vscale x 2 x float> @vfrdiv_vf_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -250,9 +234,7 @@ define <vscale x 4 x float> @vfrdiv_vf_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -278,9 +260,7 @@ define <vscale x 8 x float> @vfrdiv_vf_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -306,9 +286,7 @@ define <vscale x 16 x float> @vfrdiv_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -334,9 +312,7 @@ define <vscale x 1 x double> @vfrdiv_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -362,9 +338,7 @@ define <vscale x 2 x double> @vfrdiv_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -390,9 +364,7 @@ define <vscale x 4 x double> @vfrdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -418,8 +390,6 @@ define <vscale x 8 x double> @vfrdiv_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll
index 9e1c719e3bfa60..bd941dc1a77724 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll
@@ -26,9 +26,7 @@ define <vscale x 1 x half> @vfrsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -54,9 +52,7 @@ define <vscale x 2 x half> @vfrsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -82,9 +78,7 @@ define <vscale x 4 x half> @vfrsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -110,9 +104,7 @@ define <vscale x 8 x half> @vfrsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -138,9 +130,7 @@ define <vscale x 16 x half> @vfrsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -166,9 +156,7 @@ define <vscale x 32 x half> @vfrsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -194,9 +182,7 @@ define <vscale x 1 x float> @vfrsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -222,9 +208,7 @@ define <vscale x 2 x float> @vfrsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -250,9 +234,7 @@ define <vscale x 4 x float> @vfrsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -278,9 +260,7 @@ define <vscale x 8 x float> @vfrsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -306,9 +286,7 @@ define <vscale x 16 x float> @vfrsub_vf_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -334,9 +312,7 @@ define <vscale x 1 x double> @vfrsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -362,9 +338,7 @@ define <vscale x 2 x double> @vfrsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -390,9 +364,7 @@ define <vscale x 4 x double> @vfrsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -418,8 +390,6 @@ define <vscale x 8 x double> @vfrsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index b13d221e00e634..d6caad15e40a2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -46,9 +46,7 @@ define <vscale x 1 x half> @vfsqrt_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.sqrt.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.sqrt.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -90,9 +88,7 @@ define <vscale x 2 x half> @vfsqrt_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.sqrt.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.sqrt.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -134,9 +130,7 @@ define <vscale x 4 x half> @vfsqrt_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.sqrt.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.sqrt.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -178,9 +172,7 @@ define <vscale x 8 x half> @vfsqrt_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.sqrt.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.sqrt.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -222,9 +214,7 @@ define <vscale x 16 x half> @vfsqrt_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.sqrt.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.sqrt.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -279,8 +269,6 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -288,6 +276,8 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -306,9 +296,7 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.sqrt.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.sqrt.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -330,9 +318,7 @@ define <vscale x 1 x float> @vfsqrt_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.sqrt.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.sqrt.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -354,9 +340,7 @@ define <vscale x 2 x float> @vfsqrt_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.sqrt.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.sqrt.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -378,9 +362,7 @@ define <vscale x 4 x float> @vfsqrt_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.sqrt.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.sqrt.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -402,9 +384,7 @@ define <vscale x 8 x float> @vfsqrt_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.sqrt.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.sqrt.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -426,9 +406,7 @@ define <vscale x 16 x float> @vfsqrt_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.sqrt.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.sqrt.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -450,9 +428,7 @@ define <vscale x 1 x double> @vfsqrt_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.sqrt.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.sqrt.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -474,9 +450,7 @@ define <vscale x 2 x double> @vfsqrt_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.sqrt.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.sqrt.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -498,9 +472,7 @@ define <vscale x 4 x double> @vfsqrt_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.sqrt.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.sqrt.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -522,9 +494,7 @@ define <vscale x 7 x double> @vfsqrt_vv_nxv7f64_unmasked(<vscale x 7 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x double> @llvm.vp.sqrt.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x double> @llvm.vp.sqrt.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -546,9 +516,7 @@ define <vscale x 8 x double> @vfsqrt_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.sqrt.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.sqrt.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -598,8 +566,6 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64_unmasked(<vscale x 16 x double
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll
index b8b95ad21de607..b7941c17dab5ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll
@@ -562,9 +562,7 @@ define <vscale x 8 x float> @vfsub_vv_mask_nxv8f32(<vscale x 8 x float> %va, <vs
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> %splat
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %vb, <vscale x 8 x float> splat (float 0.0)
   %vc = fsub fast <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
@@ -575,11 +573,9 @@ define <vscale x 8 x float> @vfsub_vf_mask_nxv8f32(<vscale x 8 x float> %va, flo
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x float> poison, float 0.0, i32 0
-  %splat0 = shufflevector <vscale x 8 x float> %head0, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
   %head1 = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %splat1 = shufflevector <vscale x 8 x float> %head1, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> %splat0
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x float> %splat1, <vscale x 8 x float> splat (float 0.0)
   %vc = fsub fast <vscale x 8 x float> %va, %vs
   ret <vscale x 8 x float> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 010b133e51b1f2..2eae18d7cc4937 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -48,9 +48,7 @@ define <vscale x 1 x half> @vfsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -104,9 +102,7 @@ define <vscale x 1 x half> @vfsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -150,9 +146,7 @@ define <vscale x 2 x half> @vfsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -206,9 +200,7 @@ define <vscale x 2 x half> @vfsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,9 +244,7 @@ define <vscale x 4 x half> @vfsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -308,9 +298,7 @@ define <vscale x 4 x half> @vfsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -354,9 +342,7 @@ define <vscale x 8 x half> @vfsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -410,9 +396,7 @@ define <vscale x 8 x half> @vfsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -456,9 +440,7 @@ define <vscale x 16 x half> @vfsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -512,9 +494,7 @@ define <vscale x 16 x half> @vfsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -591,8 +571,6 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -600,6 +578,8 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -628,9 +608,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -696,8 +674,6 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -705,6 +681,8 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
+; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -726,9 +704,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -750,9 +726,7 @@ define <vscale x 1 x float> @vfsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -776,9 +750,7 @@ define <vscale x 1 x float> @vfsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -800,9 +772,7 @@ define <vscale x 2 x float> @vfsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -826,9 +796,7 @@ define <vscale x 2 x float> @vfsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -850,9 +818,7 @@ define <vscale x 4 x float> @vfsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -876,9 +842,7 @@ define <vscale x 4 x float> @vfsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -900,9 +864,7 @@ define <vscale x 8 x float> @vfsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -926,9 +888,7 @@ define <vscale x 8 x float> @vfsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -950,9 +910,7 @@ define <vscale x 16 x float> @vfsub_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -976,9 +934,7 @@ define <vscale x 16 x float> @vfsub_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1000,9 +956,7 @@ define <vscale x 1 x double> @vfsub_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1026,9 +980,7 @@ define <vscale x 1 x double> @vfsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1050,9 +1002,7 @@ define <vscale x 2 x double> @vfsub_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1076,9 +1026,7 @@ define <vscale x 2 x double> @vfsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1100,9 +1048,7 @@ define <vscale x 4 x double> @vfsub_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1126,9 +1072,7 @@ define <vscale x 4 x double> @vfsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1162,9 +1106,7 @@ define <vscale x 8 x double> @vfsub_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1188,8 +1130,6 @@ define <vscale x 8 x double> @vfsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index 8a484c7f6b7769..fab76ac5645814 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -51,11 +51,9 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfmadd.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -76,11 +74,9 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32_tu(<vscale x 1 x half> %a, <vscal
 ; ZVFHMIN-NEXT:    vfmacc.vv v10, v11, v8, v0.t
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -132,12 +128,10 @@ define <vscale x 1 x float> @vfmacc_vv_nxv1f32_unmasked_tu(<vscale x 1 x half> %
 ; ZVFHMIN-NEXT:    vfmacc.vv v10, v11, v8
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -223,11 +217,9 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_unmasked(<vscale x 1 x half> %va,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -255,11 +247,9 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_tu(<vscale x 1 x half> %va, half
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %vc, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -288,11 +278,9 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_commute_tu(<vscale x 1 x half> %v
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vbext, <vscale x 1 x float> %vaext, <vscale x 1 x float> %vc, <vscale x 1 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vbext, <vscale x 1 x float> %vaext, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %vc, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -321,12 +309,10 @@ define <vscale x 1 x float> @vfmacc_vf_nxv1f32_unmasked_tu(<vscale x 1 x half> %
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %vc, i32 %evl)
+  %vaext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vaext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %vc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %vc, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -374,11 +360,9 @@ define <vscale x 2 x float> @vfmacc_vv_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfmadd.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -434,11 +418,9 @@ define <vscale x 2 x float> @vfmacc_vf_nxv2f32_unmasked(<vscale x 2 x half> %va,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %vaext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vaext, <vscale x 2 x float> %vbext, <vscale x 2 x float> %vc, <vscale x 2 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vaext, <vscale x 2 x float> %vbext, <vscale x 2 x float> %vc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -488,11 +470,9 @@ define <vscale x 4 x float> @vfmacc_vv_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    vfmadd.vv v12, v14, v10
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -548,11 +528,9 @@ define <vscale x 4 x float> @vfmacc_vf_nxv4f32_unmasked(<vscale x 4 x half> %va,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %vaext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vaext, <vscale x 4 x float> %vbext, <vscale x 4 x float> %vc, <vscale x 4 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vaext, <vscale x 4 x float> %vbext, <vscale x 4 x float> %vc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -602,11 +580,9 @@ define <vscale x 8 x float> @vfmacc_vv_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    vfmadd.vv v16, v20, v12
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -662,11 +638,9 @@ define <vscale x 8 x float> @vfmacc_vf_nxv8f32_unmasked(<vscale x 8 x half> %va,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vaext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vaext, <vscale x 8 x float> %vbext, <vscale x 8 x float> %vc, <vscale x 8 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vaext, <vscale x 8 x float> %vbext, <vscale x 8 x float> %vc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -730,11 +704,9 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32_unmasked(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    vfmadd.vv v24, v0, v16
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v24
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -790,11 +762,9 @@ define <vscale x 16 x float> @vfmacc_vf_nxv16f32_unmasked(<vscale x 16 x half> %
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %vaext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vaext, <vscale x 16 x float> %vbext, <vscale x 16 x float> %vc, <vscale x 16 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vaext, <vscale x 16 x float> %vbext, <vscale x 16 x float> %vc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -824,11 +794,9 @@ define <vscale x 1 x double> @vfmacc_vv_nxv1f64_unmasked(<vscale x 1 x float> %a
 ; CHECK-NEXT:    vfwmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -856,11 +824,9 @@ define <vscale x 1 x double> @vfmacc_vf_nxv1f64_unmasked(<vscale x 1 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %vaext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vaext, <vscale x 1 x double> %vbext, <vscale x 1 x double> %vc, <vscale x 1 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vaext, <vscale x 1 x double> %vbext, <vscale x 1 x double> %vc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -890,11 +856,9 @@ define <vscale x 2 x double> @vfmacc_vv_nxv2f64_unmasked(<vscale x 2 x float> %a
 ; CHECK-NEXT:    vfwmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -922,11 +886,9 @@ define <vscale x 2 x double> @vfmacc_vf_nxv2f64_unmasked(<vscale x 2 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %vaext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vaext, <vscale x 2 x double> %vbext, <vscale x 2 x double> %vc, <vscale x 2 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vaext, <vscale x 2 x double> %vbext, <vscale x 2 x double> %vc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -956,11 +918,9 @@ define <vscale x 4 x double> @vfmacc_vv_nxv4f64_unmasked(<vscale x 4 x float> %a
 ; CHECK-NEXT:    vfwmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -988,11 +948,9 @@ define <vscale x 4 x double> @vfmacc_vf_nxv4f64_unmasked(<vscale x 4 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %vaext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vaext, <vscale x 4 x double> %vbext, <vscale x 4 x double> %vc, <vscale x 4 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vaext, <vscale x 4 x double> %vbext, <vscale x 4 x double> %vc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1022,11 +980,9 @@ define <vscale x 8 x double> @vfmacc_vv_nxv8f64_unmasked(<vscale x 8 x float> %a
 ; CHECK-NEXT:    vfwmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1054,11 +1010,9 @@ define <vscale x 8 x double> @vfmacc_vf_nxv8f64_unmasked(<vscale x 8 x float> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vaext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vaext, <vscale x 8 x double> %vbext, <vscale x 8 x double> %vc, <vscale x 8 x i1> %allones, i32 %evl)
+  %vaext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vaext, <vscale x 8 x double> %vbext, <vscale x 8 x double> %vc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1090,11 +1044,9 @@ define <vscale x 1 x double> @vfmacc_vv_nxv1f64_nxv1f16_unmasked(<vscale x 1 x h
 ; CHECK-NEXT:    vfwmacc.vv v10, v11, v8
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1126,11 +1078,9 @@ define <vscale x 2 x double> @vfmacc_vv_nxv2f64_nxv2f16_unmasked(<vscale x 2 x h
 ; CHECK-NEXT:    vfwmacc.vv v10, v12, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1162,11 +1112,9 @@ define <vscale x 4 x double> @vfmacc_vv_nxv4f64_nxv4f16_unmasked(<vscale x 4 x h
 ; CHECK-NEXT:    vfwmacc.vv v12, v10, v16
 ; CHECK-NEXT:    vmv4r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1198,11 +1146,9 @@ define <vscale x 8 x double> @vfmacc_vv_nxv8f64_nxv8f16_unmasked(<vscale x 8 x h
 ; CHECK-NEXT:    vfwmacc.vv v16, v12, v24
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1243,9 +1189,7 @@ define <vscale x 1 x float> @vfmacc_squared_nxv1f32_unmasked(<vscale x 1 x half>
 ; ZVFHMIN-NEXT:    vfmadd.vv v9, v9, v10
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %aext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %aext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll
index 92ad961999ddcb..4cd9b8bc2cdede 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-vp.ll
@@ -52,12 +52,10 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfmsub.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -78,12 +76,10 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32_tu(<vscale x 1 x half> %a, <vscal
 ; ZVFHMIN-NEXT:    vfmsac.vv v10, v11, v8, v0.t
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %m, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
@@ -105,13 +101,11 @@ define <vscale x 1 x float> @vmfsac_vv_nxv1f32_unmasked_tu(<vscale x 1 x half> %
 ; ZVFHMIN-NEXT:    vfmsac.vv v10, v11, v8
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> %allones, <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x float> @llvm.vp.merge.nxv1f32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x float> %v, <vscale x 1 x float> %c, i32 %evl)
   ret <vscale x 1 x float> %u
 }
 
@@ -138,8 +132,6 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32(<vscale x 1 x half> %a, half %b,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
   %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %m, i32 %evl)
   %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
@@ -201,12 +193,10 @@ define <vscale x 1 x float> @vmfsac_vf_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x float> %vbext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -253,12 +243,10 @@ define <vscale x 2 x float> @vmfsac_vv_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfmsub.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %bext, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %bext, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -285,8 +273,6 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32(<vscale x 2 x half> %a, half %b,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %m, i32 %evl)
   %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
@@ -348,12 +334,10 @@ define <vscale x 2 x float> @vmfsac_vf_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %vbext, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x float> %vbext, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -402,12 +386,10 @@ define <vscale x 4 x float> @vmfsac_vv_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    vfmsub.vv v12, v14, v10
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %bext, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %bext, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -434,8 +416,6 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32(<vscale x 4 x half> %a, half %b,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
   %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %m, i32 %evl)
   %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
@@ -497,12 +477,10 @@ define <vscale x 4 x float> @vmfsac_vf_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %vbext, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x float> %vbext, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -551,12 +529,10 @@ define <vscale x 8 x float> @vmfsac_vv_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    vfmsub.vv v16, v20, v12
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %bext, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %bext, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -583,8 +559,6 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32(<vscale x 8 x half> %a, half %b,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
   %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %m, i32 %evl)
   %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
@@ -646,11 +620,9 @@ define <vscale x 8 x float> @vmfsac_vf_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %vbext, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x float> %vbext, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index 3a03f0d65273ed..ca0bbfd65ca29c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -52,13 +52,11 @@ define <vscale x 1 x float> @vfnmacc_vv_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfnmadd.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -149,13 +147,11 @@ define <vscale x 1 x float> @vfnmacc_vf_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -203,13 +199,11 @@ define <vscale x 2 x float> @vfnmacc_vv_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfnmadd.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -300,13 +294,11 @@ define <vscale x 2 x float> @vfnmacc_vf_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -356,13 +348,11 @@ define <vscale x 4 x float> @vfnmacc_vv_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    vfnmadd.vv v12, v14, v10
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -453,13 +443,11 @@ define <vscale x 4 x float> @vfnmacc_vf_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -509,13 +497,11 @@ define <vscale x 8 x float> @vfnmacc_vv_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    vfnmadd.vv v16, v20, v12
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -606,13 +592,11 @@ define <vscale x 8 x float> @vfnmacc_vf_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -676,13 +660,11 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vfnmadd.vv v24, v0, v16
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v24
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -773,13 +755,11 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %negc, <vscale x 16 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %negc, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -809,13 +789,11 @@ define <vscale x 1 x double> @vfnmacc_vv_nxv1f64_unmasked(<vscale x 1 x float> %
 ; CHECK-NEXT:    vfwnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -863,13 +841,11 @@ define <vscale x 1 x double> @vfnmacc_vf_nxv1f64_unmasked(<vscale x 1 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -899,13 +875,11 @@ define <vscale x 2 x double> @vfnmacc_vv_nxv2f64_unmasked(<vscale x 2 x float> %
 ; CHECK-NEXT:    vfwnmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -953,13 +927,11 @@ define <vscale x 2 x double> @vfnmacc_vf_nxv2f64_unmasked(<vscale x 2 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -989,13 +961,11 @@ define <vscale x 4 x double> @vfnmacc_vv_nxv4f64_unmasked(<vscale x 4 x float> %
 ; CHECK-NEXT:    vfwnmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1043,13 +1013,11 @@ define <vscale x 4 x double> @vfnmacc_vf_nxv4f64_unmasked(<vscale x 4 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1079,13 +1047,11 @@ define <vscale x 8 x double> @vfnmacc_vv_nxv8f64_unmasked(<vscale x 8 x float> %
 ; CHECK-NEXT:    vfwnmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1133,13 +1099,11 @@ define <vscale x 8 x double> @vfnmacc_vf_nxv8f64_unmasked(<vscale x 8 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1173,13 +1137,11 @@ define <vscale x 1 x double> @vfnmacc_vv_nxv1f64_nxv1f16_unmasked(<vscale x 1 x
 ; CHECK-NEXT:    vfwnmacc.vv v10, v11, v8
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %negc, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %negc, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1213,13 +1175,11 @@ define <vscale x 2 x double> @vfnmacc_vv_nxv2f64_nxv2f16_unmasked(<vscale x 2 x
 ; CHECK-NEXT:    vfwnmacc.vv v10, v12, v8
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %negc, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %negc, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1253,13 +1213,11 @@ define <vscale x 4 x double> @vfnmacc_vv_nxv4f64_nxv4f16_unmasked(<vscale x 4 x
 ; CHECK-NEXT:    vfwnmacc.vv v12, v10, v16
 ; CHECK-NEXT:    vmv4r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %negc, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %negc, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1293,12 +1251,10 @@ define <vscale x 8 x double> @vfnmacc_vv_nxv8f64_nxv4f16_unmasked(<vscale x 8 x
 ; CHECK-NEXT:    vfwnmacc.vv v16, v12, v24
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %negc, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %negc = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %negc, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index a8cc0ce92aa16e..2797ca2eb31630 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -51,12 +51,10 @@ define <vscale x 1 x float> @vfnmsac_vv_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfnmsub.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -144,12 +142,10 @@ define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_unmasked(<vscale x 1 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -196,12 +192,10 @@ define <vscale x 2 x float> @vfnmsac_vv_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfnmsub.vv v8, v11, v10
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -289,12 +283,10 @@ define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_unmasked(<vscale x 2 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -343,12 +335,10 @@ define <vscale x 4 x float> @vfnmsac_vv_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    vfnmsub.vv v12, v14, v10
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -436,12 +426,10 @@ define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_unmasked(<vscale x 4 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -490,12 +478,10 @@ define <vscale x 8 x float> @vfnmsac_vv_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    vfnmsub.vv v16, v20, v12
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -583,12 +569,10 @@ define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_unmasked(<vscale x 8 x half> %a,
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -651,12 +635,10 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vfnmsub.vv v24, v0, v16
 ; ZVFHMIN-NEXT:    vmv.v.v v8, v24
 ; ZVFHMIN-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -744,12 +726,10 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %allones, i32 %evl)
-  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -778,12 +758,10 @@ define <vscale x 1 x double> @vfnmsac_vv_nxv1f64_unmasked(<vscale x 1 x float> %
 ; CHECK-NEXT:    vfwnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -828,12 +806,10 @@ define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_unmasked(<vscale x 1 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -862,12 +838,10 @@ define <vscale x 2 x double> @vfnmsac_vv_nxv2f64_unmasked(<vscale x 2 x float> %
 ; CHECK-NEXT:    vfwnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -912,12 +886,10 @@ define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_unmasked(<vscale x 2 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %allones, i32 %evl)
-  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -946,12 +918,10 @@ define <vscale x 4 x double> @vfnmsac_vv_nxv4f64_unmasked(<vscale x 4 x float> %
 ; CHECK-NEXT:    vfwnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -996,12 +966,10 @@ define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_unmasked(<vscale x 4 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %allones, i32 %evl)
-  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1030,12 +998,10 @@ define <vscale x 8 x double> @vfnmsac_vv_nxv8f64_unmasked(<vscale x 8 x float> %
 ; CHECK-NEXT:    vfwnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1080,11 +1046,9 @@ define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_unmasked(<vscale x 8 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %allones, i32 %evl)
-  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %allones, i32 %evl)
-  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
index 797bd4125f481a..5aba7ef4cc5b68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
@@ -16,10 +16,8 @@ define <vscale x 1 x i8> @vmacc_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -31,11 +29,9 @@ define <vscale x 1 x i8> @vmacc_vv_nxv1i8_unmasked(<vscale x 1 x i8> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -48,10 +44,8 @@ define <vscale x 1 x i8> @vmacc_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -65,11 +59,9 @@ define <vscale x 1 x i8> @vmacc_vx_nxv1i8_unmasked(<vscale x 1 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -80,10 +72,8 @@ define <vscale x 1 x i8> @vmacc_vv_nxv1i8_ta(<vscale x 1 x i8> %a, <vscale x 1 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -97,10 +87,8 @@ define <vscale x 1 x i8> @vmacc_vx_nxv1i8_ta(<vscale x 1 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -117,10 +105,8 @@ define <vscale x 2 x i8> @vmacc_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -132,11 +118,9 @@ define <vscale x 2 x i8> @vmacc_vv_nxv2i8_unmasked(<vscale x 2 x i8> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -149,10 +133,8 @@ define <vscale x 2 x i8> @vmacc_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -166,11 +148,9 @@ define <vscale x 2 x i8> @vmacc_vx_nxv2i8_unmasked(<vscale x 2 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -181,10 +161,8 @@ define <vscale x 2 x i8> @vmacc_vv_nxv2i8_ta(<vscale x 2 x i8> %a, <vscale x 2 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -198,10 +176,8 @@ define <vscale x 2 x i8> @vmacc_vx_nxv2i8_ta(<vscale x 2 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -218,10 +194,8 @@ define <vscale x 4 x i8> @vmacc_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -233,11 +207,9 @@ define <vscale x 4 x i8> @vmacc_vv_nxv4i8_unmasked(<vscale x 4 x i8> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -250,10 +222,8 @@ define <vscale x 4 x i8> @vmacc_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -267,11 +237,9 @@ define <vscale x 4 x i8> @vmacc_vx_nxv4i8_unmasked(<vscale x 4 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -282,10 +250,8 @@ define <vscale x 4 x i8> @vmacc_vv_nxv4i8_ta(<vscale x 4 x i8> %a, <vscale x 4 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -299,10 +265,8 @@ define <vscale x 4 x i8> @vmacc_vx_nxv4i8_ta(<vscale x 4 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -319,10 +283,8 @@ define <vscale x 8 x i8> @vmacc_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -334,11 +296,9 @@ define <vscale x 8 x i8> @vmacc_vv_nxv8i8_unmasked(<vscale x 8 x i8> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -351,10 +311,8 @@ define <vscale x 8 x i8> @vmacc_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -368,11 +326,9 @@ define <vscale x 8 x i8> @vmacc_vx_nxv8i8_unmasked(<vscale x 8 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -383,10 +339,8 @@ define <vscale x 8 x i8> @vmacc_vv_nxv8i8_ta(<vscale x 8 x i8> %a, <vscale x 8 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -400,10 +354,8 @@ define <vscale x 8 x i8> @vmacc_vx_nxv8i8_ta(<vscale x 8 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -420,10 +372,8 @@ define <vscale x 16 x i8> @vmacc_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -435,11 +385,9 @@ define <vscale x 16 x i8> @vmacc_vv_nxv16i8_unmasked(<vscale x 16 x i8> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -452,10 +400,8 @@ define <vscale x 16 x i8> @vmacc_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -469,11 +415,9 @@ define <vscale x 16 x i8> @vmacc_vx_nxv16i8_unmasked(<vscale x 16 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -484,10 +428,8 @@ define <vscale x 16 x i8> @vmacc_vv_nxv16i8_ta(<vscale x 16 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -501,10 +443,8 @@ define <vscale x 16 x i8> @vmacc_vx_nxv16i8_ta(<vscale x 16 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -521,10 +461,8 @@ define <vscale x 32 x i8> @vmacc_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -536,11 +474,9 @@ define <vscale x 32 x i8> @vmacc_vv_nxv32i8_unmasked(<vscale x 32 x i8> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -553,10 +489,8 @@ define <vscale x 32 x i8> @vmacc_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -570,11 +504,9 @@ define <vscale x 32 x i8> @vmacc_vx_nxv32i8_unmasked(<vscale x 32 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -585,10 +517,8 @@ define <vscale x 32 x i8> @vmacc_vv_nxv32i8_ta(<vscale x 32 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -602,10 +532,8 @@ define <vscale x 32 x i8> @vmacc_vx_nxv32i8_ta(<vscale x 32 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -623,10 +551,8 @@ define <vscale x 64 x i8> @vmacc_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -639,11 +565,9 @@ define <vscale x 64 x i8> @vmacc_vv_nxv64i8_unmasked(<vscale x 64 x i8> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -656,10 +580,8 @@ define <vscale x 64 x i8> @vmacc_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -673,11 +595,9 @@ define <vscale x 64 x i8> @vmacc_vx_nxv64i8_unmasked(<vscale x 64 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -689,10 +609,8 @@ define <vscale x 64 x i8> @vmacc_vv_nxv64i8_ta(<vscale x 64 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -706,10 +624,8 @@ define <vscale x 64 x i8> @vmacc_vx_nxv64i8_ta(<vscale x 64 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -726,10 +642,8 @@ define <vscale x 1 x i16> @vmacc_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -741,11 +655,9 @@ define <vscale x 1 x i16> @vmacc_vv_nxv1i16_unmasked(<vscale x 1 x i16> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -758,10 +670,8 @@ define <vscale x 1 x i16> @vmacc_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -775,11 +685,9 @@ define <vscale x 1 x i16> @vmacc_vx_nxv1i16_unmasked(<vscale x 1 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -790,10 +698,8 @@ define <vscale x 1 x i16> @vmacc_vv_nxv1i16_ta(<vscale x 1 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -807,10 +713,8 @@ define <vscale x 1 x i16> @vmacc_vx_nxv1i16_ta(<vscale x 1 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -827,10 +731,8 @@ define <vscale x 2 x i16> @vmacc_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -842,11 +744,9 @@ define <vscale x 2 x i16> @vmacc_vv_nxv2i16_unmasked(<vscale x 2 x i16> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -859,10 +759,8 @@ define <vscale x 2 x i16> @vmacc_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -876,11 +774,9 @@ define <vscale x 2 x i16> @vmacc_vx_nxv2i16_unmasked(<vscale x 2 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -891,10 +787,8 @@ define <vscale x 2 x i16> @vmacc_vv_nxv2i16_ta(<vscale x 2 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -908,10 +802,8 @@ define <vscale x 2 x i16> @vmacc_vx_nxv2i16_ta(<vscale x 2 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -928,10 +820,8 @@ define <vscale x 4 x i16> @vmacc_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -943,11 +833,9 @@ define <vscale x 4 x i16> @vmacc_vv_nxv4i16_unmasked(<vscale x 4 x i16> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -960,10 +848,8 @@ define <vscale x 4 x i16> @vmacc_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -977,11 +863,9 @@ define <vscale x 4 x i16> @vmacc_vx_nxv4i16_unmasked(<vscale x 4 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -992,10 +876,8 @@ define <vscale x 4 x i16> @vmacc_vv_nxv4i16_ta(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1009,10 +891,8 @@ define <vscale x 4 x i16> @vmacc_vx_nxv4i16_ta(<vscale x 4 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1029,10 +909,8 @@ define <vscale x 8 x i16> @vmacc_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1044,11 +922,9 @@ define <vscale x 8 x i16> @vmacc_vv_nxv8i16_unmasked(<vscale x 8 x i16> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1061,10 +937,8 @@ define <vscale x 8 x i16> @vmacc_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1078,11 +952,9 @@ define <vscale x 8 x i16> @vmacc_vx_nxv8i16_unmasked(<vscale x 8 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1093,10 +965,8 @@ define <vscale x 8 x i16> @vmacc_vv_nxv8i16_ta(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1110,10 +980,8 @@ define <vscale x 8 x i16> @vmacc_vx_nxv8i16_ta(<vscale x 8 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1130,10 +998,8 @@ define <vscale x 16 x i16> @vmacc_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1145,11 +1011,9 @@ define <vscale x 16 x i16> @vmacc_vv_nxv16i16_unmasked(<vscale x 16 x i16> %a, <
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1162,10 +1026,8 @@ define <vscale x 16 x i16> @vmacc_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1179,11 +1041,9 @@ define <vscale x 16 x i16> @vmacc_vx_nxv16i16_unmasked(<vscale x 16 x i16> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1194,10 +1054,8 @@ define <vscale x 16 x i16> @vmacc_vv_nxv16i16_ta(<vscale x 16 x i16> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1211,10 +1069,8 @@ define <vscale x 16 x i16> @vmacc_vx_nxv16i16_ta(<vscale x 16 x i16> %a, i16 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1232,10 +1088,8 @@ define <vscale x 32 x i16> @vmacc_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1248,11 +1102,9 @@ define <vscale x 32 x i16> @vmacc_vv_nxv32i16_unmasked(<vscale x 32 x i16> %a, <
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1265,10 +1117,8 @@ define <vscale x 32 x i16> @vmacc_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1282,11 +1132,9 @@ define <vscale x 32 x i16> @vmacc_vx_nxv32i16_unmasked(<vscale x 32 x i16> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1298,10 +1146,8 @@ define <vscale x 32 x i16> @vmacc_vv_nxv32i16_ta(<vscale x 32 x i16> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1315,10 +1161,8 @@ define <vscale x 32 x i16> @vmacc_vx_nxv32i16_ta(<vscale x 32 x i16> %a, i16 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1335,10 +1179,8 @@ define <vscale x 1 x i32> @vmacc_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1350,11 +1192,9 @@ define <vscale x 1 x i32> @vmacc_vv_nxv1i32_unmasked(<vscale x 1 x i32> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1367,10 +1207,8 @@ define <vscale x 1 x i32> @vmacc_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1384,11 +1222,9 @@ define <vscale x 1 x i32> @vmacc_vx_nxv1i32_unmasked(<vscale x 1 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1399,10 +1235,8 @@ define <vscale x 1 x i32> @vmacc_vv_nxv1i32_ta(<vscale x 1 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1416,10 +1250,8 @@ define <vscale x 1 x i32> @vmacc_vx_nxv1i32_ta(<vscale x 1 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1436,10 +1268,8 @@ define <vscale x 2 x i32> @vmacc_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1451,11 +1281,9 @@ define <vscale x 2 x i32> @vmacc_vv_nxv2i32_unmasked(<vscale x 2 x i32> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1468,10 +1296,8 @@ define <vscale x 2 x i32> @vmacc_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1485,11 +1311,9 @@ define <vscale x 2 x i32> @vmacc_vx_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1500,10 +1324,8 @@ define <vscale x 2 x i32> @vmacc_vv_nxv2i32_ta(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1517,10 +1339,8 @@ define <vscale x 2 x i32> @vmacc_vx_nxv2i32_ta(<vscale x 2 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1537,10 +1357,8 @@ define <vscale x 4 x i32> @vmacc_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1552,11 +1370,9 @@ define <vscale x 4 x i32> @vmacc_vv_nxv4i32_unmasked(<vscale x 4 x i32> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1569,10 +1385,8 @@ define <vscale x 4 x i32> @vmacc_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1586,11 +1400,9 @@ define <vscale x 4 x i32> @vmacc_vx_nxv4i32_unmasked(<vscale x 4 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1601,10 +1413,8 @@ define <vscale x 4 x i32> @vmacc_vv_nxv4i32_ta(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1618,10 +1428,8 @@ define <vscale x 4 x i32> @vmacc_vx_nxv4i32_ta(<vscale x 4 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1638,10 +1446,8 @@ define <vscale x 8 x i32> @vmacc_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1653,11 +1459,9 @@ define <vscale x 8 x i32> @vmacc_vv_nxv8i32_unmasked(<vscale x 8 x i32> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1670,10 +1474,8 @@ define <vscale x 8 x i32> @vmacc_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1687,11 +1489,9 @@ define <vscale x 8 x i32> @vmacc_vx_nxv8i32_unmasked(<vscale x 8 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1702,10 +1502,8 @@ define <vscale x 8 x i32> @vmacc_vv_nxv8i32_ta(<vscale x 8 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1719,10 +1517,8 @@ define <vscale x 8 x i32> @vmacc_vx_nxv8i32_ta(<vscale x 8 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1740,10 +1536,8 @@ define <vscale x 16 x i32> @vmacc_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1756,11 +1550,9 @@ define <vscale x 16 x i32> @vmacc_vv_nxv16i32_unmasked(<vscale x 16 x i32> %a, <
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1773,10 +1565,8 @@ define <vscale x 16 x i32> @vmacc_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1790,11 +1580,9 @@ define <vscale x 16 x i32> @vmacc_vx_nxv16i32_unmasked(<vscale x 16 x i32> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1806,10 +1594,8 @@ define <vscale x 16 x i32> @vmacc_vv_nxv16i32_ta(<vscale x 16 x i32> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1823,10 +1609,8 @@ define <vscale x 16 x i32> @vmacc_vx_nxv16i32_ta(<vscale x 16 x i32> %a, i32 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1843,10 +1627,8 @@ define <vscale x 1 x i64> @vmacc_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1858,11 +1640,9 @@ define <vscale x 1 x i64> @vmacc_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1890,10 +1670,8 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1922,11 +1700,9 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1937,10 +1713,8 @@ define <vscale x 1 x i64> @vmacc_vv_nxv1i64_ta(<vscale x 1 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1969,10 +1743,8 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1989,10 +1761,8 @@ define <vscale x 2 x i64> @vmacc_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2004,11 +1774,9 @@ define <vscale x 2 x i64> @vmacc_vv_nxv2i64_unmasked(<vscale x 2 x i64> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2036,10 +1804,8 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2068,11 +1834,9 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2083,10 +1847,8 @@ define <vscale x 2 x i64> @vmacc_vv_nxv2i64_ta(<vscale x 2 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2115,10 +1877,8 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2135,10 +1895,8 @@ define <vscale x 4 x i64> @vmacc_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2150,11 +1908,9 @@ define <vscale x 4 x i64> @vmacc_vv_nxv4i64_unmasked(<vscale x 4 x i64> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2182,10 +1938,8 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2214,11 +1968,9 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2229,10 +1981,8 @@ define <vscale x 4 x i64> @vmacc_vv_nxv4i64_ta(<vscale x 4 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2261,10 +2011,8 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2282,10 +2030,8 @@ define <vscale x 8 x i64> @vmacc_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2298,11 +2044,9 @@ define <vscale x 8 x i64> @vmacc_vv_nxv8i64_unmasked(<vscale x 8 x i64> %a, <vsc
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2330,10 +2074,8 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2362,11 +2104,9 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2378,10 +2118,8 @@ define <vscale x 8 x i64> @vmacc_vv_nxv8i64_ta(<vscale x 8 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2410,10 +2148,8 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
index 6a6d7d2a41424c..0322c1ab9f6310 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-vp.ll
@@ -17,10 +17,8 @@ define <vscale x 1 x i8> @vmadd_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -33,11 +31,9 @@ define <vscale x 1 x i8> @vmadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %a, <vscale
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -49,10 +45,8 @@ define <vscale x 1 x i8> @vmadd_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -65,11 +59,9 @@ define <vscale x 1 x i8> @vmadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -80,10 +72,8 @@ define <vscale x 1 x i8> @vmadd_vv_nxv1i8_ta(<vscale x 1 x i8> %a, <vscale x 1 x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -97,10 +87,8 @@ define <vscale x 1 x i8> @vmadd_vx_nxv1i8_ta(<vscale x 1 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -118,10 +106,8 @@ define <vscale x 2 x i8> @vmadd_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -134,11 +120,9 @@ define <vscale x 2 x i8> @vmadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %a, <vscale
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -150,10 +134,8 @@ define <vscale x 2 x i8> @vmadd_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -166,11 +148,9 @@ define <vscale x 2 x i8> @vmadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -181,10 +161,8 @@ define <vscale x 2 x i8> @vmadd_vv_nxv2i8_ta(<vscale x 2 x i8> %a, <vscale x 2 x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -198,10 +176,8 @@ define <vscale x 2 x i8> @vmadd_vx_nxv2i8_ta(<vscale x 2 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -219,10 +195,8 @@ define <vscale x 4 x i8> @vmadd_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -235,11 +209,9 @@ define <vscale x 4 x i8> @vmadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %a, <vscale
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -251,10 +223,8 @@ define <vscale x 4 x i8> @vmadd_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -267,11 +237,9 @@ define <vscale x 4 x i8> @vmadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -282,10 +250,8 @@ define <vscale x 4 x i8> @vmadd_vv_nxv4i8_ta(<vscale x 4 x i8> %a, <vscale x 4 x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -299,10 +265,8 @@ define <vscale x 4 x i8> @vmadd_vx_nxv4i8_ta(<vscale x 4 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -320,10 +284,8 @@ define <vscale x 8 x i8> @vmadd_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -336,11 +298,9 @@ define <vscale x 8 x i8> @vmadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %a, <vscale
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -352,10 +312,8 @@ define <vscale x 8 x i8> @vmadd_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b, <vscale x
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -368,11 +326,9 @@ define <vscale x 8 x i8> @vmadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -383,10 +339,8 @@ define <vscale x 8 x i8> @vmadd_vv_nxv8i8_ta(<vscale x 8 x i8> %a, <vscale x 8 x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -400,10 +354,8 @@ define <vscale x 8 x i8> @vmadd_vx_nxv8i8_ta(<vscale x 8 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -421,10 +373,8 @@ define <vscale x 16 x i8> @vmadd_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -437,11 +387,9 @@ define <vscale x 16 x i8> @vmadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -453,10 +401,8 @@ define <vscale x 16 x i8> @vmadd_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -469,11 +415,9 @@ define <vscale x 16 x i8> @vmadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -484,10 +428,8 @@ define <vscale x 16 x i8> @vmadd_vv_nxv16i8_ta(<vscale x 16 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v10, v8, v12
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -501,10 +443,8 @@ define <vscale x 16 x i8> @vmadd_vx_nxv16i8_ta(<vscale x 16 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -522,10 +462,8 @@ define <vscale x 32 x i8> @vmadd_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -538,11 +476,9 @@ define <vscale x 32 x i8> @vmadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -554,10 +490,8 @@ define <vscale x 32 x i8> @vmadd_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -570,11 +504,9 @@ define <vscale x 32 x i8> @vmadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -585,10 +517,8 @@ define <vscale x 32 x i8> @vmadd_vv_nxv32i8_ta(<vscale x 32 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v12, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -602,10 +532,8 @@ define <vscale x 32 x i8> @vmadd_vx_nxv32i8_ta(<vscale x 32 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -624,10 +552,8 @@ define <vscale x 64 x i8> @vmadd_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -641,11 +567,9 @@ define <vscale x 64 x i8> @vmadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -657,10 +581,8 @@ define <vscale x 64 x i8> @vmadd_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b, <vscal
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -673,11 +595,9 @@ define <vscale x 64 x i8> @vmadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %a, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -689,10 +609,8 @@ define <vscale x 64 x i8> @vmadd_vv_nxv64i8_ta(<vscale x 64 x i8> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -706,10 +624,8 @@ define <vscale x 64 x i8> @vmadd_vx_nxv64i8_ta(<vscale x 64 x i8> %a, i8 %b, <vs
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -727,10 +643,8 @@ define <vscale x 1 x i16> @vmadd_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -743,11 +657,9 @@ define <vscale x 1 x i16> @vmadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -759,10 +671,8 @@ define <vscale x 1 x i16> @vmadd_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -775,11 +685,9 @@ define <vscale x 1 x i16> @vmadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -790,10 +698,8 @@ define <vscale x 1 x i16> @vmadd_vv_nxv1i16_ta(<vscale x 1 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -807,10 +713,8 @@ define <vscale x 1 x i16> @vmadd_vx_nxv1i16_ta(<vscale x 1 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -828,10 +732,8 @@ define <vscale x 2 x i16> @vmadd_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -844,11 +746,9 @@ define <vscale x 2 x i16> @vmadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -860,10 +760,8 @@ define <vscale x 2 x i16> @vmadd_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -876,11 +774,9 @@ define <vscale x 2 x i16> @vmadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -891,10 +787,8 @@ define <vscale x 2 x i16> @vmadd_vv_nxv2i16_ta(<vscale x 2 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -908,10 +802,8 @@ define <vscale x 2 x i16> @vmadd_vx_nxv2i16_ta(<vscale x 2 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -929,10 +821,8 @@ define <vscale x 4 x i16> @vmadd_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -945,11 +835,9 @@ define <vscale x 4 x i16> @vmadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -961,10 +849,8 @@ define <vscale x 4 x i16> @vmadd_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -977,11 +863,9 @@ define <vscale x 4 x i16> @vmadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -992,10 +876,8 @@ define <vscale x 4 x i16> @vmadd_vv_nxv4i16_ta(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1009,10 +891,8 @@ define <vscale x 4 x i16> @vmadd_vx_nxv4i16_ta(<vscale x 4 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1030,10 +910,8 @@ define <vscale x 8 x i16> @vmadd_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1046,11 +924,9 @@ define <vscale x 8 x i16> @vmadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1062,10 +938,8 @@ define <vscale x 8 x i16> @vmadd_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1078,11 +952,9 @@ define <vscale x 8 x i16> @vmadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1093,10 +965,8 @@ define <vscale x 8 x i16> @vmadd_vv_nxv8i16_ta(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v10, v8, v12
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1110,10 +980,8 @@ define <vscale x 8 x i16> @vmadd_vx_nxv8i16_ta(<vscale x 8 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1131,10 +999,8 @@ define <vscale x 16 x i16> @vmadd_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1147,11 +1013,9 @@ define <vscale x 16 x i16> @vmadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %a, <
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1163,10 +1027,8 @@ define <vscale x 16 x i16> @vmadd_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1179,11 +1041,9 @@ define <vscale x 16 x i16> @vmadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1194,10 +1054,8 @@ define <vscale x 16 x i16> @vmadd_vv_nxv16i16_ta(<vscale x 16 x i16> %a, <vscale
 ; CHECK-NEXT:    vmadd.vv v12, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1211,10 +1069,8 @@ define <vscale x 16 x i16> @vmadd_vx_nxv16i16_ta(<vscale x 16 x i16> %a, i16 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1233,10 +1089,8 @@ define <vscale x 32 x i16> @vmadd_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1250,11 +1104,9 @@ define <vscale x 32 x i16> @vmadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %a, <
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1266,10 +1118,8 @@ define <vscale x 32 x i16> @vmadd_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1282,11 +1132,9 @@ define <vscale x 32 x i16> @vmadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1298,10 +1146,8 @@ define <vscale x 32 x i16> @vmadd_vv_nxv32i16_ta(<vscale x 32 x i16> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1315,10 +1161,8 @@ define <vscale x 32 x i16> @vmadd_vx_nxv32i16_ta(<vscale x 32 x i16> %a, i16 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1336,10 +1180,8 @@ define <vscale x 1 x i32> @vmadd_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1352,11 +1194,9 @@ define <vscale x 1 x i32> @vmadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1368,10 +1208,8 @@ define <vscale x 1 x i32> @vmadd_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1384,11 +1222,9 @@ define <vscale x 1 x i32> @vmadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1399,10 +1235,8 @@ define <vscale x 1 x i32> @vmadd_vv_nxv1i32_ta(<vscale x 1 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1416,10 +1250,8 @@ define <vscale x 1 x i32> @vmadd_vx_nxv1i32_ta(<vscale x 1 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1437,10 +1269,8 @@ define <vscale x 2 x i32> @vmadd_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1453,11 +1283,9 @@ define <vscale x 2 x i32> @vmadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1469,10 +1297,8 @@ define <vscale x 2 x i32> @vmadd_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1485,11 +1311,9 @@ define <vscale x 2 x i32> @vmadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1500,10 +1324,8 @@ define <vscale x 2 x i32> @vmadd_vv_nxv2i32_ta(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1517,10 +1339,8 @@ define <vscale x 2 x i32> @vmadd_vx_nxv2i32_ta(<vscale x 2 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1538,10 +1358,8 @@ define <vscale x 4 x i32> @vmadd_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1554,11 +1372,9 @@ define <vscale x 4 x i32> @vmadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1570,10 +1386,8 @@ define <vscale x 4 x i32> @vmadd_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1586,11 +1400,9 @@ define <vscale x 4 x i32> @vmadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1601,10 +1413,8 @@ define <vscale x 4 x i32> @vmadd_vv_nxv4i32_ta(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v10, v8, v12
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1618,10 +1428,8 @@ define <vscale x 4 x i32> @vmadd_vx_nxv4i32_ta(<vscale x 4 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1639,10 +1447,8 @@ define <vscale x 8 x i32> @vmadd_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1655,11 +1461,9 @@ define <vscale x 8 x i32> @vmadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1671,10 +1475,8 @@ define <vscale x 8 x i32> @vmadd_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1687,11 +1489,9 @@ define <vscale x 8 x i32> @vmadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1702,10 +1502,8 @@ define <vscale x 8 x i32> @vmadd_vv_nxv8i32_ta(<vscale x 8 x i32> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v12, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1719,10 +1517,8 @@ define <vscale x 8 x i32> @vmadd_vx_nxv8i32_ta(<vscale x 8 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1741,10 +1537,8 @@ define <vscale x 16 x i32> @vmadd_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1758,11 +1552,9 @@ define <vscale x 16 x i32> @vmadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %a, <
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1774,10 +1566,8 @@ define <vscale x 16 x i32> @vmadd_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1790,11 +1580,9 @@ define <vscale x 16 x i32> @vmadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %a, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1806,10 +1594,8 @@ define <vscale x 16 x i32> @vmadd_vv_nxv16i32_ta(<vscale x 16 x i32> %a, <vscale
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1823,10 +1609,8 @@ define <vscale x 16 x i32> @vmadd_vx_nxv16i32_ta(<vscale x 16 x i32> %a, i32 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1844,10 +1628,8 @@ define <vscale x 1 x i64> @vmadd_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1860,11 +1642,9 @@ define <vscale x 1 x i64> @vmadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1892,10 +1672,8 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1924,11 +1702,9 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1939,10 +1715,8 @@ define <vscale x 1 x i64> @vmadd_vv_nxv1i64_ta(<vscale x 1 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v9, v8, v10
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1971,10 +1745,8 @@ define <vscale x 1 x i64> @vmadd_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1992,10 +1764,8 @@ define <vscale x 2 x i64> @vmadd_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2008,11 +1778,9 @@ define <vscale x 2 x i64> @vmadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2040,10 +1808,8 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2072,11 +1838,9 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2087,10 +1851,8 @@ define <vscale x 2 x i64> @vmadd_vv_nxv2i64_ta(<vscale x 2 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v10, v8, v12
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2119,10 +1881,8 @@ define <vscale x 2 x i64> @vmadd_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2140,10 +1900,8 @@ define <vscale x 4 x i64> @vmadd_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2156,11 +1914,9 @@ define <vscale x 4 x i64> @vmadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2188,10 +1944,8 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2220,11 +1974,9 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2235,10 +1987,8 @@ define <vscale x 4 x i64> @vmadd_vv_nxv4i64_ta(<vscale x 4 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmadd.vv v12, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2267,10 +2017,8 @@ define <vscale x 4 x i64> @vmadd_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2289,10 +2037,8 @@ define <vscale x 8 x i64> @vmadd_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2306,11 +2052,9 @@ define <vscale x 8 x i64> @vmadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %a, <vsc
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2338,10 +2082,8 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsca
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2370,11 +2112,9 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2386,10 +2126,8 @@ define <vscale x 8 x i64> @vmadd_vv_nxv8i64_ta(<vscale x 8 x i64> %a, <vscale x
 ; CHECK-NEXT:    vmacc.vv v24, v8, v16
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v24, v0
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2418,10 +2156,8 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <v
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
index d243c89958c543..91ea33733560b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
@@ -159,9 +159,7 @@ define <vscale x 1 x i1> @vmnand_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = and <vscale x 1 x i1> %va, %vb
-  %head = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %not = xor <vscale x 1 x i1> %vc, %splat
+  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
   ret <vscale x 1 x i1> %not
 }
 
@@ -172,9 +170,7 @@ define <vscale x 2 x i1> @vmnand_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = and <vscale x 2 x i1> %va, %vb
-  %head = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %vc, %splat
+  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
   ret <vscale x 2 x i1> %not
 }
 
@@ -185,9 +181,7 @@ define <vscale x 4 x i1> @vmnand_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = and <vscale x 4 x i1> %va, %vb
-  %head = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %vc, %splat
+  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
   ret <vscale x 4 x i1> %not
 }
 
@@ -198,9 +192,7 @@ define <vscale x 8 x i1> @vmnand_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = and <vscale x 8 x i1> %va, %vb
-  %head = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %vc, %splat
+  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
   ret <vscale x 8 x i1> %not
 }
 
@@ -211,9 +203,7 @@ define <vscale x 16 x i1> @vmnand_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 1
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = and <vscale x 16 x i1> %va, %vb
-  %head = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %not = xor <vscale x 16 x i1> %vc, %splat
+  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
   ret <vscale x 16 x i1> %not
 }
 
@@ -224,9 +214,7 @@ define <vscale x 1 x i1> @vmnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = or <vscale x 1 x i1> %va, %vb
-  %head = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %not = xor <vscale x 1 x i1> %vc, %splat
+  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
   ret <vscale x 1 x i1> %not
 }
 
@@ -237,9 +225,7 @@ define <vscale x 2 x i1> @vmnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = or <vscale x 2 x i1> %va, %vb
-  %head = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %vc, %splat
+  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
   ret <vscale x 2 x i1> %not
 }
 
@@ -250,9 +236,7 @@ define <vscale x 4 x i1> @vmnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = or <vscale x 4 x i1> %va, %vb
-  %head = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %vc, %splat
+  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
   ret <vscale x 4 x i1> %not
 }
 
@@ -263,9 +247,7 @@ define <vscale x 8 x i1> @vmnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = or <vscale x 8 x i1> %va, %vb
-  %head = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %vc, %splat
+  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
   ret <vscale x 8 x i1> %not
 }
 
@@ -276,9 +258,7 @@ define <vscale x 16 x i1> @vmnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = or <vscale x 16 x i1> %va, %vb
-  %head = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %not = xor <vscale x 16 x i1> %vc, %splat
+  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
   ret <vscale x 16 x i1> %not
 }
 
@@ -289,9 +269,7 @@ define <vscale x 1 x i1> @vmxnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = xor <vscale x 1 x i1> %va, %vb
-  %head = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %not = xor <vscale x 1 x i1> %vc, %splat
+  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
   ret <vscale x 1 x i1> %not
 }
 
@@ -302,9 +280,7 @@ define <vscale x 2 x i1> @vmxnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = xor <vscale x 2 x i1> %va, %vb
-  %head = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %vc, %splat
+  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
   ret <vscale x 2 x i1> %not
 }
 
@@ -315,9 +291,7 @@ define <vscale x 4 x i1> @vmxnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = xor <vscale x 4 x i1> %va, %vb
-  %head = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %vc, %splat
+  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
   ret <vscale x 4 x i1> %not
 }
 
@@ -328,9 +302,7 @@ define <vscale x 8 x i1> @vmxnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = xor <vscale x 8 x i1> %va, %vb
-  %head = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %vc, %splat
+  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
   ret <vscale x 8 x i1> %not
 }
 
@@ -341,9 +313,7 @@ define <vscale x 16 x i1> @vmxnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 1
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
   %vc = xor <vscale x 16 x i1> %va, %vb
-  %head = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %not = xor <vscale x 16 x i1> %vc, %splat
+  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
   ret <vscale x 16 x i1> %not
 }
 
@@ -353,9 +323,7 @@ define <vscale x 1 x i1> @vmandn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmandn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %not = xor <vscale x 1 x i1> %vb, %splat
+  %not = xor <vscale x 1 x i1> %vb, splat (i1 1)
   %vc = and <vscale x 1 x i1> %va, %not
   ret <vscale x 1 x i1> %vc
 }
@@ -366,9 +334,7 @@ define <vscale x 2 x i1> @vmandn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmandn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %vb, %splat
+  %not = xor <vscale x 2 x i1> %vb, splat (i1 1)
   %vc = and <vscale x 2 x i1> %va, %not
   ret <vscale x 2 x i1> %vc
 }
@@ -379,9 +345,7 @@ define <vscale x 4 x i1> @vmandn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmandn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %vb, %splat
+  %not = xor <vscale x 4 x i1> %vb, splat (i1 1)
   %vc = and <vscale x 4 x i1> %va, %not
   ret <vscale x 4 x i1> %vc
 }
@@ -392,9 +356,7 @@ define <vscale x 8 x i1> @vmandn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmandn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %vb, %splat
+  %not = xor <vscale x 8 x i1> %vb, splat (i1 1)
   %vc = and <vscale x 8 x i1> %va, %not
   ret <vscale x 8 x i1> %vc
 }
@@ -405,9 +367,7 @@ define <vscale x 16 x i1> @vmandn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmandn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %not = xor <vscale x 16 x i1> %vb, %splat
+  %not = xor <vscale x 16 x i1> %vb, splat (i1 1)
   %vc = and <vscale x 16 x i1> %va, %not
   ret <vscale x 16 x i1> %vc
 }
@@ -418,9 +378,7 @@ define <vscale x 1 x i1> @vmorn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmorn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %not = xor <vscale x 1 x i1> %vb, %splat
+  %not = xor <vscale x 1 x i1> %vb, splat (i1 1)
   %vc = or <vscale x 1 x i1> %va, %not
   ret <vscale x 1 x i1> %vc
 }
@@ -431,9 +389,7 @@ define <vscale x 2 x i1> @vmorn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmorn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %not = xor <vscale x 2 x i1> %vb, %splat
+  %not = xor <vscale x 2 x i1> %vb, splat (i1 1)
   %vc = or <vscale x 2 x i1> %va, %not
   ret <vscale x 2 x i1> %vc
 }
@@ -444,9 +400,7 @@ define <vscale x 4 x i1> @vmorn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmorn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %not = xor <vscale x 4 x i1> %vb, %splat
+  %not = xor <vscale x 4 x i1> %vb, splat (i1 1)
   %vc = or <vscale x 4 x i1> %va, %not
   ret <vscale x 4 x i1> %vc
 }
@@ -457,9 +411,7 @@ define <vscale x 8 x i1> @vmorn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmorn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %not = xor <vscale x 8 x i1> %vb, %splat
+  %not = xor <vscale x 8 x i1> %vb, splat (i1 1)
   %vc = or <vscale x 8 x i1> %va, %not
   ret <vscale x 8 x i1> %vc
 }
@@ -470,9 +422,7 @@ define <vscale x 16 x i1> @vmorn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmorn.mm v0, v0, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %not = xor <vscale x 16 x i1> %vb, %splat
+  %not = xor <vscale x 16 x i1> %vb, splat (i1 1)
   %vc = or <vscale x 16 x i1> %va, %not
   ret <vscale x 16 x i1> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
index 1247f3d29c0999..52720755dd5b5b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
@@ -33,10 +33,8 @@ define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 1 x i8> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
+  %cmp = icmp sgt <vscale x 1 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -71,10 +69,8 @@ define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i8> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
+  %cmp = icmp sgt <vscale x 2 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -3)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -109,10 +105,8 @@ define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i8> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
+  %cmp = icmp sgt <vscale x 4 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -3)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -147,10 +141,8 @@ define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i8> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
+  %cmp = icmp sgt <vscale x 8 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -3)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -185,10 +177,8 @@ define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 16 x i8> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
+  %cmp = icmp sgt <vscale x 16 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -3)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -223,10 +213,8 @@ define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 32 x i8> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
+  %cmp = icmp sgt <vscale x 32 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -3)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -261,10 +249,8 @@ define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 64 x i8> %va, %splat
-  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
+  %cmp = icmp sgt <vscale x 64 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -3)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -299,10 +285,8 @@ define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 1 x i16> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
+  %cmp = icmp sgt <vscale x 1 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -3)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -337,10 +321,8 @@ define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i16> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
+  %cmp = icmp sgt <vscale x 2 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -3)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -375,10 +357,8 @@ define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i16> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
+  %cmp = icmp sgt <vscale x 4 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -3)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -413,10 +393,8 @@ define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i16> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
+  %cmp = icmp sgt <vscale x 8 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -3)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -451,10 +429,8 @@ define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 16 x i16> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
+  %cmp = icmp sgt <vscale x 16 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -3)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -489,10 +465,8 @@ define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 32 x i16> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
+  %cmp = icmp sgt <vscale x 32 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -3)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -527,10 +501,8 @@ define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 1 x i32> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
+  %cmp = icmp sgt <vscale x 1 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -3)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -565,10 +537,8 @@ define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i32> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
+  %cmp = icmp sgt <vscale x 2 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -3)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -603,10 +573,8 @@ define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i32> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
+  %cmp = icmp sgt <vscale x 4 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -3)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -641,10 +609,8 @@ define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i32> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
+  %cmp = icmp sgt <vscale x 8 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -3)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -679,10 +645,8 @@ define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 16 x i32> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
+  %cmp = icmp sgt <vscale x 16 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -3)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -730,10 +694,8 @@ define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 1 x i64> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
+  %cmp = icmp sgt <vscale x 1 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -3)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -781,10 +743,8 @@ define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 2 x i64> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
+  %cmp = icmp sgt <vscale x 2 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -3)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -832,10 +792,8 @@ define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 4 x i64> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
+  %cmp = icmp sgt <vscale x 4 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -3)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -883,9 +841,7 @@ define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp sgt <vscale x 8 x i64> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
+  %cmp = icmp sgt <vscale x 8 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -3)
   ret <vscale x 8 x i64> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index c69f5fdb5b7111..a35fc874065a70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -39,9 +39,7 @@ define <vscale x 1 x i8> @vmax_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -77,9 +75,7 @@ define <vscale x 1 x i8> @vmax_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -101,9 +97,7 @@ define <vscale x 2 x i8> @vmax_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -127,9 +121,7 @@ define <vscale x 2 x i8> @vmax_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <vscale x 3 x i8> @vmax_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <vscale x 3 x i8> @vmax_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <vscale x 4 x i8> @vmax_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <vscale x 4 x i8> @vmax_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <vscale x 8 x i8> @vmax_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <vscale x 8 x i8> @vmax_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -301,9 +281,7 @@ define <vscale x 16 x i8> @vmax_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -327,9 +305,7 @@ define <vscale x 16 x i8> @vmax_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -351,9 +327,7 @@ define <vscale x 32 x i8> @vmax_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -377,9 +351,7 @@ define <vscale x 32 x i8> @vmax_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -401,9 +373,7 @@ define <vscale x 64 x i8> @vmax_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -427,9 +397,7 @@ define <vscale x 64 x i8> @vmax_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -485,9 +453,7 @@ define <vscale x 128 x i8> @vmax_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.smax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -509,9 +475,7 @@ define <vscale x 1 x i16> @vmax_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -535,9 +499,7 @@ define <vscale x 1 x i16> @vmax_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -559,9 +521,7 @@ define <vscale x 2 x i16> @vmax_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -585,9 +545,7 @@ define <vscale x 2 x i16> @vmax_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -609,9 +567,7 @@ define <vscale x 4 x i16> @vmax_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -635,9 +591,7 @@ define <vscale x 4 x i16> @vmax_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -659,9 +613,7 @@ define <vscale x 8 x i16> @vmax_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -685,9 +637,7 @@ define <vscale x 8 x i16> @vmax_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -709,9 +659,7 @@ define <vscale x 16 x i16> @vmax_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -735,9 +683,7 @@ define <vscale x 16 x i16> @vmax_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -759,9 +705,7 @@ define <vscale x 32 x i16> @vmax_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -785,9 +729,7 @@ define <vscale x 32 x i16> @vmax_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -809,9 +751,7 @@ define <vscale x 1 x i32> @vmax_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -835,9 +775,7 @@ define <vscale x 1 x i32> @vmax_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -859,9 +797,7 @@ define <vscale x 2 x i32> @vmax_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -885,9 +821,7 @@ define <vscale x 2 x i32> @vmax_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -909,9 +843,7 @@ define <vscale x 4 x i32> @vmax_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -935,9 +867,7 @@ define <vscale x 4 x i32> @vmax_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -959,9 +889,7 @@ define <vscale x 8 x i32> @vmax_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -985,9 +913,7 @@ define <vscale x 8 x i32> @vmax_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1009,9 +935,7 @@ define <vscale x 16 x i32> @vmax_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1035,9 +959,7 @@ define <vscale x 16 x i32> @vmax_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1094,9 +1016,7 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.smax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1194,9 +1114,7 @@ define <vscale x 1 x i64> @vmax_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1248,9 +1166,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1272,9 +1188,7 @@ define <vscale x 2 x i64> @vmax_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1326,9 +1240,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1350,9 +1262,7 @@ define <vscale x 4 x i64> @vmax_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1404,9 +1314,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1428,9 +1336,7 @@ define <vscale x 8 x i64> @vmax_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1482,8 +1388,6 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
index a6693fad8dd5f6..8eb70fbc91fa54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
@@ -33,10 +33,8 @@ define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 1 x i8> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
+  %cmp = icmp ugt <vscale x 1 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -71,10 +69,8 @@ define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i8> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
+  %cmp = icmp ugt <vscale x 2 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -3)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -109,10 +105,8 @@ define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i8> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
+  %cmp = icmp ugt <vscale x 4 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -3)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -147,10 +141,8 @@ define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i8> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
+  %cmp = icmp ugt <vscale x 8 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -3)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -185,10 +177,8 @@ define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 16 x i8> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
+  %cmp = icmp ugt <vscale x 16 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -3)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -223,10 +213,8 @@ define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 32 x i8> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
+  %cmp = icmp ugt <vscale x 32 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -3)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -261,10 +249,8 @@ define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 64 x i8> %va, %splat
-  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
+  %cmp = icmp ugt <vscale x 64 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -3)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -299,10 +285,8 @@ define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 1 x i16> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
+  %cmp = icmp ugt <vscale x 1 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -3)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -337,10 +321,8 @@ define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i16> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
+  %cmp = icmp ugt <vscale x 2 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -3)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -375,10 +357,8 @@ define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i16> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
+  %cmp = icmp ugt <vscale x 4 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -3)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -413,10 +393,8 @@ define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i16> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
+  %cmp = icmp ugt <vscale x 8 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -3)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -451,10 +429,8 @@ define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 16 x i16> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
+  %cmp = icmp ugt <vscale x 16 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -3)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -489,10 +465,8 @@ define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 32 x i16> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
+  %cmp = icmp ugt <vscale x 32 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -3)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -527,10 +501,8 @@ define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 1 x i32> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
+  %cmp = icmp ugt <vscale x 1 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -3)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -565,10 +537,8 @@ define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i32> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
+  %cmp = icmp ugt <vscale x 2 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -3)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -603,10 +573,8 @@ define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i32> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
+  %cmp = icmp ugt <vscale x 4 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -3)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -641,10 +609,8 @@ define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i32> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
+  %cmp = icmp ugt <vscale x 8 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -3)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -679,10 +645,8 @@ define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 16 x i32> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
+  %cmp = icmp ugt <vscale x 16 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -3)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -730,10 +694,8 @@ define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 1 x i64> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
+  %cmp = icmp ugt <vscale x 1 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -3)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -781,10 +743,8 @@ define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 2 x i64> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
+  %cmp = icmp ugt <vscale x 2 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -3)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -832,10 +792,8 @@ define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 4 x i64> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
+  %cmp = icmp ugt <vscale x 4 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -3)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -883,10 +841,8 @@ define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ugt <vscale x 8 x i64> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
+  %cmp = icmp ugt <vscale x 8 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -3)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -928,9 +884,7 @@ define <vscale x 8 x i32> @vmax_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vim v12, v12, -3, v0
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 -3), <vscale x 8 x i32> zeroinitializer
   %cmp = icmp ugt <vscale x 8 x i32> %va, %vs
   %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vs
   ret <vscale x 8 x i32> %vc
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index a7fce573da9fe3..1f620a44dbbc89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vmaxu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -79,9 +77,7 @@ define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -103,9 +99,7 @@ define <vscale x 2 x i8> @vmaxu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -129,9 +123,7 @@ define <vscale x 2 x i8> @vmaxu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -153,9 +145,7 @@ define <vscale x 3 x i8> @vmaxu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -179,9 +169,7 @@ define <vscale x 3 x i8> @vmaxu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -203,9 +191,7 @@ define <vscale x 4 x i8> @vmaxu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -229,9 +215,7 @@ define <vscale x 4 x i8> @vmaxu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -253,9 +237,7 @@ define <vscale x 8 x i8> @vmaxu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -279,9 +261,7 @@ define <vscale x 8 x i8> @vmaxu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -303,9 +283,7 @@ define <vscale x 16 x i8> @vmaxu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -329,9 +307,7 @@ define <vscale x 16 x i8> @vmaxu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -353,9 +329,7 @@ define <vscale x 32 x i8> @vmaxu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -379,9 +353,7 @@ define <vscale x 32 x i8> @vmaxu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -403,9 +375,7 @@ define <vscale x 64 x i8> @vmaxu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -429,9 +399,7 @@ define <vscale x 64 x i8> @vmaxu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -487,9 +455,7 @@ define <vscale x 128 x i8> @vmaxu_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -511,9 +477,7 @@ define <vscale x 1 x i16> @vmaxu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -537,9 +501,7 @@ define <vscale x 1 x i16> @vmaxu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -561,9 +523,7 @@ define <vscale x 2 x i16> @vmaxu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -587,9 +547,7 @@ define <vscale x 2 x i16> @vmaxu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -611,9 +569,7 @@ define <vscale x 4 x i16> @vmaxu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -637,9 +593,7 @@ define <vscale x 4 x i16> @vmaxu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -661,9 +615,7 @@ define <vscale x 8 x i16> @vmaxu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -687,9 +639,7 @@ define <vscale x 8 x i16> @vmaxu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -711,9 +661,7 @@ define <vscale x 16 x i16> @vmaxu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -737,9 +685,7 @@ define <vscale x 16 x i16> @vmaxu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -761,9 +707,7 @@ define <vscale x 32 x i16> @vmaxu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -787,9 +731,7 @@ define <vscale x 32 x i16> @vmaxu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -811,9 +753,7 @@ define <vscale x 1 x i32> @vmaxu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -837,9 +777,7 @@ define <vscale x 1 x i32> @vmaxu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -861,9 +799,7 @@ define <vscale x 2 x i32> @vmaxu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -887,9 +823,7 @@ define <vscale x 2 x i32> @vmaxu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -911,9 +845,7 @@ define <vscale x 4 x i32> @vmaxu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -937,9 +869,7 @@ define <vscale x 4 x i32> @vmaxu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -961,9 +891,7 @@ define <vscale x 8 x i32> @vmaxu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -987,9 +915,7 @@ define <vscale x 8 x i32> @vmaxu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1011,9 +937,7 @@ define <vscale x 16 x i32> @vmaxu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1037,9 +961,7 @@ define <vscale x 16 x i32> @vmaxu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1096,9 +1018,7 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1196,9 +1116,7 @@ define <vscale x 1 x i64> @vmaxu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1250,9 +1168,7 @@ define <vscale x 1 x i64> @vmaxu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1274,9 +1190,7 @@ define <vscale x 2 x i64> @vmaxu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1328,9 +1242,7 @@ define <vscale x 2 x i64> @vmaxu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1352,9 +1264,7 @@ define <vscale x 4 x i64> @vmaxu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1406,9 +1316,7 @@ define <vscale x 4 x i64> @vmaxu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1430,9 +1338,7 @@ define <vscale x 8 x i64> @vmaxu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1484,8 +1390,6 @@ define <vscale x 8 x i64> @vmaxu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
index 7405282a07b701..7f526a21deac1c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
@@ -33,10 +33,8 @@ define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 1 x i8> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
+  %cmp = icmp slt <vscale x 1 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -71,10 +69,8 @@ define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i8> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
+  %cmp = icmp slt <vscale x 2 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -3)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -109,10 +105,8 @@ define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i8> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
+  %cmp = icmp slt <vscale x 4 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -3)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -147,10 +141,8 @@ define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i8> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
+  %cmp = icmp slt <vscale x 8 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -3)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -185,10 +177,8 @@ define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 16 x i8> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
+  %cmp = icmp slt <vscale x 16 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -3)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -223,10 +213,8 @@ define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 32 x i8> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
+  %cmp = icmp slt <vscale x 32 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -3)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -261,10 +249,8 @@ define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 64 x i8> %va, %splat
-  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
+  %cmp = icmp slt <vscale x 64 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -3)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -299,10 +285,8 @@ define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 1 x i16> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
+  %cmp = icmp slt <vscale x 1 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -3)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -337,10 +321,8 @@ define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i16> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
+  %cmp = icmp slt <vscale x 2 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -3)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -375,10 +357,8 @@ define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i16> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
+  %cmp = icmp slt <vscale x 4 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -3)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -413,10 +393,8 @@ define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i16> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
+  %cmp = icmp slt <vscale x 8 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -3)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -451,10 +429,8 @@ define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 16 x i16> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
+  %cmp = icmp slt <vscale x 16 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -3)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -489,10 +465,8 @@ define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 32 x i16> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
+  %cmp = icmp slt <vscale x 32 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -3)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -527,10 +501,8 @@ define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 1 x i32> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
+  %cmp = icmp slt <vscale x 1 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -3)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -565,10 +537,8 @@ define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i32> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
+  %cmp = icmp slt <vscale x 2 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -3)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -603,10 +573,8 @@ define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i32> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
+  %cmp = icmp slt <vscale x 4 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -3)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -641,10 +609,8 @@ define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i32> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
+  %cmp = icmp slt <vscale x 8 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -3)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -679,10 +645,8 @@ define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 16 x i32> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
+  %cmp = icmp slt <vscale x 16 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -3)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -730,10 +694,8 @@ define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 1 x i64> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
+  %cmp = icmp slt <vscale x 1 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -3)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -781,10 +743,8 @@ define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 2 x i64> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
+  %cmp = icmp slt <vscale x 2 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -3)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -832,10 +792,8 @@ define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 4 x i64> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
+  %cmp = icmp slt <vscale x 4 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -3)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -883,9 +841,7 @@ define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp slt <vscale x 8 x i64> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
+  %cmp = icmp slt <vscale x 8 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -3)
   ret <vscale x 8 x i64> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 95c5cda5e988e1..8fabf93356aebb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -39,9 +39,7 @@ define <vscale x 1 x i8> @vmin_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -77,9 +75,7 @@ define <vscale x 1 x i8> @vmin_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -101,9 +97,7 @@ define <vscale x 2 x i8> @vmin_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -127,9 +121,7 @@ define <vscale x 2 x i8> @vmin_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <vscale x 3 x i8> @vmin_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <vscale x 3 x i8> @vmin_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <vscale x 4 x i8> @vmin_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <vscale x 4 x i8> @vmin_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <vscale x 8 x i8> @vmin_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <vscale x 8 x i8> @vmin_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -301,9 +281,7 @@ define <vscale x 16 x i8> @vmin_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -327,9 +305,7 @@ define <vscale x 16 x i8> @vmin_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -351,9 +327,7 @@ define <vscale x 32 x i8> @vmin_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -377,9 +351,7 @@ define <vscale x 32 x i8> @vmin_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -401,9 +373,7 @@ define <vscale x 64 x i8> @vmin_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -427,9 +397,7 @@ define <vscale x 64 x i8> @vmin_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -485,9 +453,7 @@ define <vscale x 128 x i8> @vmin_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -509,9 +475,7 @@ define <vscale x 1 x i16> @vmin_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -535,9 +499,7 @@ define <vscale x 1 x i16> @vmin_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -559,9 +521,7 @@ define <vscale x 2 x i16> @vmin_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -585,9 +545,7 @@ define <vscale x 2 x i16> @vmin_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -609,9 +567,7 @@ define <vscale x 4 x i16> @vmin_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -635,9 +591,7 @@ define <vscale x 4 x i16> @vmin_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -659,9 +613,7 @@ define <vscale x 8 x i16> @vmin_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -685,9 +637,7 @@ define <vscale x 8 x i16> @vmin_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -709,9 +659,7 @@ define <vscale x 16 x i16> @vmin_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -735,9 +683,7 @@ define <vscale x 16 x i16> @vmin_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -759,9 +705,7 @@ define <vscale x 32 x i16> @vmin_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -785,9 +729,7 @@ define <vscale x 32 x i16> @vmin_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -809,9 +751,7 @@ define <vscale x 1 x i32> @vmin_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -835,9 +775,7 @@ define <vscale x 1 x i32> @vmin_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -859,9 +797,7 @@ define <vscale x 2 x i32> @vmin_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -885,9 +821,7 @@ define <vscale x 2 x i32> @vmin_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -909,9 +843,7 @@ define <vscale x 4 x i32> @vmin_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -935,9 +867,7 @@ define <vscale x 4 x i32> @vmin_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -959,9 +889,7 @@ define <vscale x 8 x i32> @vmin_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -985,9 +913,7 @@ define <vscale x 8 x i32> @vmin_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1009,9 +935,7 @@ define <vscale x 16 x i32> @vmin_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1035,9 +959,7 @@ define <vscale x 16 x i32> @vmin_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1094,9 +1016,7 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1194,9 +1114,7 @@ define <vscale x 1 x i64> @vmin_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1248,9 +1166,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1272,9 +1188,7 @@ define <vscale x 2 x i64> @vmin_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1326,9 +1240,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1350,9 +1262,7 @@ define <vscale x 4 x i64> @vmin_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1404,9 +1314,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1428,9 +1336,7 @@ define <vscale x 8 x i64> @vmin_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1482,8 +1388,6 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
index 2fbe87742a4c45..d22a7dcccf0adc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
@@ -33,10 +33,8 @@ define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 1 x i8> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
+  %cmp = icmp ult <vscale x 1 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -71,10 +69,8 @@ define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i8> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
+  %cmp = icmp ult <vscale x 2 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -3)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -109,10 +105,8 @@ define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i8> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
+  %cmp = icmp ult <vscale x 4 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -3)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -147,10 +141,8 @@ define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i8> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
+  %cmp = icmp ult <vscale x 8 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -3)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -185,10 +177,8 @@ define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 16 x i8> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
+  %cmp = icmp ult <vscale x 16 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -3)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -223,10 +213,8 @@ define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 32 x i8> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
+  %cmp = icmp ult <vscale x 32 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -3)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -261,10 +249,8 @@ define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 64 x i8> %va, %splat
-  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
+  %cmp = icmp ult <vscale x 64 x i8> %va, splat (i8 -3)
+  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -3)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -299,10 +285,8 @@ define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 1 x i16> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
+  %cmp = icmp ult <vscale x 1 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -3)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -337,10 +321,8 @@ define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i16> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
+  %cmp = icmp ult <vscale x 2 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -3)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -375,10 +357,8 @@ define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i16> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
+  %cmp = icmp ult <vscale x 4 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -3)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -413,10 +393,8 @@ define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i16> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
+  %cmp = icmp ult <vscale x 8 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -3)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -451,10 +429,8 @@ define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 16 x i16> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
+  %cmp = icmp ult <vscale x 16 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -3)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -489,10 +465,8 @@ define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 32 x i16> %va, %splat
-  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
+  %cmp = icmp ult <vscale x 32 x i16> %va, splat (i16 -3)
+  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -3)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -527,10 +501,8 @@ define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 1 x i32> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
+  %cmp = icmp ult <vscale x 1 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -3)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -565,10 +537,8 @@ define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i32> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
+  %cmp = icmp ult <vscale x 2 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -3)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -603,10 +573,8 @@ define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i32> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
+  %cmp = icmp ult <vscale x 4 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -3)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -641,10 +609,8 @@ define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i32> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
+  %cmp = icmp ult <vscale x 8 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -3)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -679,10 +645,8 @@ define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 16 x i32> %va, %splat
-  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
+  %cmp = icmp ult <vscale x 16 x i32> %va, splat (i32 -3)
+  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -3)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -730,10 +694,8 @@ define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 1 x i64> %va, %splat
-  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
+  %cmp = icmp ult <vscale x 1 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -3)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -781,10 +743,8 @@ define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 2 x i64> %va, %splat
-  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
+  %cmp = icmp ult <vscale x 2 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -3)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -832,10 +792,8 @@ define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 4 x i64> %va, %splat
-  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
+  %cmp = icmp ult <vscale x 4 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -3)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -883,10 +841,8 @@ define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %cmp = icmp ult <vscale x 8 x i64> %va, %splat
-  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
+  %cmp = icmp ult <vscale x 8 x i64> %va, splat (i64 -3)
+  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -3)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -898,9 +854,7 @@ define <vscale x 8 x i32> @vmin_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vvm v12, v16, v12, v0
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %max = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %max
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 -1)
   %cmp = icmp ult <vscale x 8 x i32> %va, %vs
   %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vs
   ret <vscale x 8 x i32> %vc
@@ -914,11 +868,9 @@ define <vscale x 8 x i32> @vmin_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
 ; CHECK-NEXT:    vmerge.vxm v12, v12, a0, v0
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %max = shufflevector <vscale x 8 x i32> %head0, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %head1 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %max
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 -1)
   %cmp = icmp ult <vscale x 8 x i32> %va, %vs
   %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vs
   ret <vscale x 8 x i32> %vc
@@ -932,11 +884,7 @@ define <vscale x 8 x i32> @vmin_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vmerge.vim v12, v12, -3, v0
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head0 = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %max = shufflevector <vscale x 8 x i32> %head0, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 -3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %max
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 -3), <vscale x 8 x i32> splat (i32 -1)
   %cmp = icmp ult <vscale x 8 x i32> %va, %vs
   %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vs
   ret <vscale x 8 x i32> %vc
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index d3d5d6ece9b41c..8ec85e545a0f8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vminu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.umin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.umin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -79,9 +77,7 @@ define <vscale x 1 x i8> @vminu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.umin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.umin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -103,9 +99,7 @@ define <vscale x 2 x i8> @vminu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.umin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.umin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -129,9 +123,7 @@ define <vscale x 2 x i8> @vminu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.umin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.umin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -153,9 +145,7 @@ define <vscale x 3 x i8> @vminu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.umin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.umin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -179,9 +169,7 @@ define <vscale x 3 x i8> @vminu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.umin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.umin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -203,9 +191,7 @@ define <vscale x 4 x i8> @vminu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.umin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.umin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -229,9 +215,7 @@ define <vscale x 4 x i8> @vminu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.umin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.umin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -253,9 +237,7 @@ define <vscale x 8 x i8> @vminu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.umin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.umin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -279,9 +261,7 @@ define <vscale x 8 x i8> @vminu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.umin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.umin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -303,9 +283,7 @@ define <vscale x 16 x i8> @vminu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.umin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.umin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -329,9 +307,7 @@ define <vscale x 16 x i8> @vminu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.umin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.umin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -353,9 +329,7 @@ define <vscale x 32 x i8> @vminu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.umin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.umin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -379,9 +353,7 @@ define <vscale x 32 x i8> @vminu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.umin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.umin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -403,9 +375,7 @@ define <vscale x 64 x i8> @vminu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.umin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.umin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -429,9 +399,7 @@ define <vscale x 64 x i8> @vminu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.umin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.umin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -487,9 +455,7 @@ define <vscale x 128 x i8> @vminu_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.umin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.umin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -511,9 +477,7 @@ define <vscale x 1 x i16> @vminu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.umin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.umin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -537,9 +501,7 @@ define <vscale x 1 x i16> @vminu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.umin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.umin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -561,9 +523,7 @@ define <vscale x 2 x i16> @vminu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.umin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.umin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -587,9 +547,7 @@ define <vscale x 2 x i16> @vminu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.umin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.umin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -611,9 +569,7 @@ define <vscale x 4 x i16> @vminu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.umin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.umin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -637,9 +593,7 @@ define <vscale x 4 x i16> @vminu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.umin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.umin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -661,9 +615,7 @@ define <vscale x 8 x i16> @vminu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.umin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.umin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -687,9 +639,7 @@ define <vscale x 8 x i16> @vminu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.umin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.umin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -711,9 +661,7 @@ define <vscale x 16 x i16> @vminu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.umin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.umin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -737,9 +685,7 @@ define <vscale x 16 x i16> @vminu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.umin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.umin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -761,9 +707,7 @@ define <vscale x 32 x i16> @vminu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.umin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.umin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -787,9 +731,7 @@ define <vscale x 32 x i16> @vminu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.umin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.umin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -811,9 +753,7 @@ define <vscale x 1 x i32> @vminu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.umin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.umin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -837,9 +777,7 @@ define <vscale x 1 x i32> @vminu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.umin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.umin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -861,9 +799,7 @@ define <vscale x 2 x i32> @vminu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.umin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.umin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -887,9 +823,7 @@ define <vscale x 2 x i32> @vminu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.umin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.umin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -911,9 +845,7 @@ define <vscale x 4 x i32> @vminu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.umin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.umin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -937,9 +869,7 @@ define <vscale x 4 x i32> @vminu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.umin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.umin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -961,9 +891,7 @@ define <vscale x 8 x i32> @vminu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.umin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.umin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -987,9 +915,7 @@ define <vscale x 8 x i32> @vminu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.umin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.umin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1011,9 +937,7 @@ define <vscale x 16 x i32> @vminu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.umin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.umin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1037,9 +961,7 @@ define <vscale x 16 x i32> @vminu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.umin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.umin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1096,9 +1018,7 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.umin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.umin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1196,9 +1116,7 @@ define <vscale x 1 x i64> @vminu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.umin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.umin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1250,9 +1168,7 @@ define <vscale x 1 x i64> @vminu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.umin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.umin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1274,9 +1190,7 @@ define <vscale x 2 x i64> @vminu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.umin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.umin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1328,9 +1242,7 @@ define <vscale x 2 x i64> @vminu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.umin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.umin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1352,9 +1264,7 @@ define <vscale x 4 x i64> @vminu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.umin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.umin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1406,9 +1316,7 @@ define <vscale x 4 x i64> @vminu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.umin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.umin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1430,9 +1338,7 @@ define <vscale x 8 x i64> @vminu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.umin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.umin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1484,8 +1390,6 @@ define <vscale x 8 x i64> @vminu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.umin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.umin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
index 3e14058210e512..1a6d5a1d0029da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
@@ -34,9 +34,7 @@ define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i8> %va, %splat
+  %vc = mul <vscale x 1 x i8> %va, splat (i8 -7)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -69,9 +67,7 @@ define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i8> %va, %splat
+  %vc = mul <vscale x 2 x i8> %va, splat (i8 -7)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -104,9 +100,7 @@ define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i8> %va, %splat
+  %vc = mul <vscale x 4 x i8> %va, splat (i8 -7)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -139,9 +133,7 @@ define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i8> %va, %splat
+  %vc = mul <vscale x 8 x i8> %va, splat (i8 -7)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -174,9 +166,7 @@ define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = mul <vscale x 16 x i8> %va, %splat
+  %vc = mul <vscale x 16 x i8> %va, splat (i8 -7)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -209,9 +199,7 @@ define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = mul <vscale x 32 x i8> %va, %splat
+  %vc = mul <vscale x 32 x i8> %va, splat (i8 -7)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -244,9 +232,7 @@ define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = mul <vscale x 64 x i8> %va, %splat
+  %vc = mul <vscale x 64 x i8> %va, splat (i8 -7)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -279,9 +265,7 @@ define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i16> %va, %splat
+  %vc = mul <vscale x 1 x i16> %va, splat (i16 -7)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -314,9 +298,7 @@ define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i16> %va, %splat
+  %vc = mul <vscale x 2 x i16> %va, splat (i16 -7)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -349,9 +331,7 @@ define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i16> %va, %splat
+  %vc = mul <vscale x 4 x i16> %va, splat (i16 -7)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -384,9 +364,7 @@ define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i16> %va, %splat
+  %vc = mul <vscale x 8 x i16> %va, splat (i16 -7)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -419,9 +397,7 @@ define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = mul <vscale x 16 x i16> %va, %splat
+  %vc = mul <vscale x 16 x i16> %va, splat (i16 -7)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -454,9 +430,7 @@ define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = mul <vscale x 32 x i16> %va, %splat
+  %vc = mul <vscale x 32 x i16> %va, splat (i16 -7)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -489,9 +463,7 @@ define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i32> %va, %splat
+  %vc = mul <vscale x 1 x i32> %va, splat (i32 -7)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -524,9 +496,7 @@ define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i32> %va, %splat
+  %vc = mul <vscale x 2 x i32> %va, splat (i32 -7)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -559,9 +529,7 @@ define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i32> %va, %splat
+  %vc = mul <vscale x 4 x i32> %va, splat (i32 -7)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -594,9 +562,7 @@ define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i32> %va, %splat
+  %vc = mul <vscale x 8 x i32> %va, splat (i32 -7)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -629,9 +595,7 @@ define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = mul <vscale x 16 x i32> %va, %splat
+  %vc = mul <vscale x 16 x i32> %va, splat (i32 -7)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -677,9 +641,7 @@ define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i64> %va, %splat
+  %vc = mul <vscale x 1 x i64> %va, splat (i64 -7)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -689,9 +651,7 @@ define <vscale x 1 x i64> @vmul_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i64> %va, %splat
+  %vc = mul <vscale x 1 x i64> %va, splat (i64 2)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -701,9 +661,7 @@ define <vscale x 1 x i64> @vmul_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = mul <vscale x 1 x i64> %va, %splat
+  %vc = mul <vscale x 1 x i64> %va, splat (i64 16)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -749,9 +707,7 @@ define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i64> %va, %splat
+  %vc = mul <vscale x 2 x i64> %va, splat (i64 -7)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -761,9 +717,7 @@ define <vscale x 2 x i64> @vmul_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i64> %va, %splat
+  %vc = mul <vscale x 2 x i64> %va, splat (i64 2)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -773,9 +727,7 @@ define <vscale x 2 x i64> @vmul_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = mul <vscale x 2 x i64> %va, %splat
+  %vc = mul <vscale x 2 x i64> %va, splat (i64 16)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -821,9 +773,7 @@ define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i64> %va, %splat
+  %vc = mul <vscale x 4 x i64> %va, splat (i64 -7)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -833,9 +783,7 @@ define <vscale x 4 x i64> @vmul_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i64> %va, %splat
+  %vc = mul <vscale x 4 x i64> %va, splat (i64 2)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -845,9 +793,7 @@ define <vscale x 4 x i64> @vmul_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = mul <vscale x 4 x i64> %va, %splat
+  %vc = mul <vscale x 4 x i64> %va, splat (i64 16)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -893,9 +839,7 @@ define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i64> %va, %splat
+  %vc = mul <vscale x 8 x i64> %va, splat (i64 -7)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -905,9 +849,7 @@ define <vscale x 8 x i64> @vmul_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i64> %va, %splat
+  %vc = mul <vscale x 8 x i64> %va, splat (i64 2)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -917,9 +859,7 @@ define <vscale x 8 x i64> @vmul_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = mul <vscale x 8 x i64> %va, %splat
+  %vc = mul <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -967,9 +907,7 @@ define <vscale x 8 x i32> @vmul_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
   %vc = mul <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -980,11 +918,9 @@ define <vscale x 8 x i32> @vmul_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
   %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
   %vc = mul <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
@@ -996,11 +932,7 @@ define <vscale x 8 x i32> @vmul_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %one = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head2 = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %one
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
   %vc = mul <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
index 30ff90d8aa4817..24d1768da1027f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vmul_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -62,9 +60,7 @@ define <vscale x 1 x i8> @vmul_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,9 +82,7 @@ define <vscale x 2 x i8> @vmul_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -112,9 +106,7 @@ define <vscale x 2 x i8> @vmul_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -136,9 +128,7 @@ define <vscale x 4 x i8> @vmul_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -162,9 +152,7 @@ define <vscale x 4 x i8> @vmul_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -186,9 +174,7 @@ define <vscale x 8 x i8> @vmul_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -212,9 +198,7 @@ define <vscale x 8 x i8> @vmul_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -236,9 +220,7 @@ define <vscale x 16 x i8> @vmul_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -262,9 +244,7 @@ define <vscale x 16 x i8> @vmul_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -286,9 +266,7 @@ define <vscale x 32 x i8> @vmul_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -312,9 +290,7 @@ define <vscale x 32 x i8> @vmul_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -336,9 +312,7 @@ define <vscale x 64 x i8> @vmul_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -362,9 +336,7 @@ define <vscale x 64 x i8> @vmul_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -386,9 +358,7 @@ define <vscale x 1 x i16> @vmul_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -412,9 +382,7 @@ define <vscale x 1 x i16> @vmul_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -436,9 +404,7 @@ define <vscale x 2 x i16> @vmul_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -462,9 +428,7 @@ define <vscale x 2 x i16> @vmul_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -486,9 +450,7 @@ define <vscale x 4 x i16> @vmul_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -512,9 +474,7 @@ define <vscale x 4 x i16> @vmul_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -536,9 +496,7 @@ define <vscale x 8 x i16> @vmul_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -562,9 +520,7 @@ define <vscale x 8 x i16> @vmul_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -586,9 +542,7 @@ define <vscale x 16 x i16> @vmul_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -612,9 +566,7 @@ define <vscale x 16 x i16> @vmul_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -636,9 +588,7 @@ define <vscale x 32 x i16> @vmul_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -662,9 +612,7 @@ define <vscale x 32 x i16> @vmul_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -686,9 +634,7 @@ define <vscale x 1 x i32> @vmul_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -712,9 +658,7 @@ define <vscale x 1 x i32> @vmul_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -736,9 +680,7 @@ define <vscale x 2 x i32> @vmul_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -762,9 +704,7 @@ define <vscale x 2 x i32> @vmul_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -786,9 +726,7 @@ define <vscale x 4 x i32> @vmul_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -812,9 +750,7 @@ define <vscale x 4 x i32> @vmul_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -836,9 +772,7 @@ define <vscale x 7 x i32> @vmul_vv_nxv7i32_unmasked(<vscale x 7 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i32> %v
 }
 
@@ -862,9 +796,7 @@ define <vscale x 7 x i32> @vmul_vx_nxv7i32_unmasked(<vscale x 7 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> poison, <vscale x 7 x i32> zeroinitializer
-  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
-  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> %m, i32 %evl)
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i32> %v
 }
 
@@ -886,9 +818,7 @@ define <vscale x 8 x i32> @vmul_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -912,9 +842,7 @@ define <vscale x 8 x i32> @vmul_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -936,9 +864,7 @@ define <vscale x 16 x i32> @vmul_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -974,9 +900,7 @@ define <vscale x 16 x i32> @vmul_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -998,9 +922,7 @@ define <vscale x 1 x i64> @vmul_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1052,9 +974,7 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1076,9 +996,7 @@ define <vscale x 2 x i64> @vmul_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1130,9 +1048,7 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1154,9 +1070,7 @@ define <vscale x 4 x i64> @vmul_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1208,9 +1122,7 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1232,9 +1144,7 @@ define <vscale x 8 x i64> @vmul_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1286,8 +1196,6 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
index 0fda7909df3134..253cfb040308b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
@@ -17,9 +17,7 @@ define <vscale x 4 x i1> @srem_eq_fold_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
-  %head_six = insertelement <vscale x 4 x i8> poison, i8 6, i32 0
-  %splat_six = shufflevector <vscale x 4 x i8> %head_six, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %rem = srem <vscale x 4 x i8> %va, %splat_six
+  %rem = srem <vscale x 4 x i8> %va, splat (i8 6)
 
   %cc = icmp eq <vscale x 4 x i8> %rem, zeroinitializer
   ret <vscale x 4 x i1> %cc
@@ -34,9 +32,7 @@ define <vscale x 1 x i32> @vmulh_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
   %vc = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
   %vd = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %ve = mul <vscale x 1 x i64> %vc, %vd
-  %head = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vf = lshr <vscale x 1 x i64> %ve, %splat
+  %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vg
 }
@@ -52,9 +48,7 @@ define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
   %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
   %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -66,14 +60,10 @@ define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
+  %vb = sext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
   %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -85,14 +75,10 @@ define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 1 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
+  %vb = sext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
   %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -106,9 +92,7 @@ define <vscale x 2 x i32> @vmulh_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
   %vc = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
   %vd = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %ve = mul <vscale x 2 x i64> %vc, %vd
-  %head = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vf = lshr <vscale x 2 x i64> %ve, %splat
+  %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vg
 }
@@ -124,9 +108,7 @@ define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
   %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
   %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -138,14 +120,10 @@ define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
+  %vb = sext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
   %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -157,14 +135,10 @@ define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
+  %vb = sext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
   %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -178,9 +152,7 @@ define <vscale x 4 x i32> @vmulh_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
   %vc = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
   %vd = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %ve = mul <vscale x 4 x i64> %vc, %vd
-  %head = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vf = lshr <vscale x 4 x i64> %ve, %splat
+  %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vg
 }
@@ -196,9 +168,7 @@ define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
   %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
   %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -210,14 +180,10 @@ define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
+  %vb = sext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
   %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -229,14 +195,10 @@ define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 4 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
+  %vb = sext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
   %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -250,9 +212,7 @@ define <vscale x 8 x i32> @vmulh_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
   %vc = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
   %vd = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %ve = mul <vscale x 8 x i64> %vc, %vd
-  %head = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vf = lshr <vscale x 8 x i64> %ve, %splat
+  %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vg
 }
@@ -268,9 +228,7 @@ define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
   %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
   %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
@@ -282,14 +240,10 @@ define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
+  %vb = sext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
   %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
@@ -301,14 +255,10 @@ define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
+  %vb = sext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
   %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
index 5354c17fd2a7d7..3fd7f5be860cf4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
@@ -11,9 +11,7 @@ define <vscale x 1 x i32> @vmulhu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
   %vc = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
   %vd = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %ve = mul <vscale x 1 x i64> %vc, %vd
-  %head = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vf = lshr <vscale x 1 x i64> %ve, %splat
+  %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vg
 }
@@ -29,9 +27,7 @@ define <vscale x 1 x i32> @vmulhu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
   %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
   %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -43,14 +39,10 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
+  %vb = zext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
   %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -68,14 +60,10 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head1 = insertelement <vscale x 1 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
+  %vb = zext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
   %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
   %vd = mul <vscale x 1 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 1 x i64> %head2, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %ve = lshr <vscale x 1 x i64> %vd, %splat2
+  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %vf
 }
@@ -89,9 +77,7 @@ define <vscale x 2 x i32> @vmulhu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
   %vc = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
   %vd = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %ve = mul <vscale x 2 x i64> %vc, %vd
-  %head = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vf = lshr <vscale x 2 x i64> %ve, %splat
+  %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vg
 }
@@ -107,9 +93,7 @@ define <vscale x 2 x i32> @vmulhu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
   %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
   %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -121,14 +105,10 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
+  %vb = zext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
   %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -146,14 +126,10 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head1 = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
+  %vb = zext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
   %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
   %vd = mul <vscale x 2 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 2 x i64> %head2, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %ve = lshr <vscale x 2 x i64> %vd, %splat2
+  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %vf
 }
@@ -167,9 +143,7 @@ define <vscale x 4 x i32> @vmulhu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
   %vc = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
   %vd = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %ve = mul <vscale x 4 x i64> %vc, %vd
-  %head = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vf = lshr <vscale x 4 x i64> %ve, %splat
+  %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vg
 }
@@ -185,9 +159,7 @@ define <vscale x 4 x i32> @vmulhu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
   %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
   %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -199,14 +171,10 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
+  %vb = zext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
   %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -224,14 +192,10 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head1 = insertelement <vscale x 4 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
+  %vb = zext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
   %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
   %vd = mul <vscale x 4 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 4 x i64> %head2, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %ve = lshr <vscale x 4 x i64> %vd, %splat2
+  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vf
 }
@@ -245,9 +209,7 @@ define <vscale x 8 x i32> @vmulhu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
   %vc = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
   %vd = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %ve = mul <vscale x 8 x i64> %vc, %vd
-  %head = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vf = lshr <vscale x 8 x i64> %ve, %splat
+  %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
   %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vg
 }
@@ -263,9 +225,7 @@ define <vscale x 8 x i32> @vmulhu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
   %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
   %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
@@ -277,14 +237,10 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
+  %vb = zext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
   %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
@@ -302,14 +258,10 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
-  %head1 = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
+  %vb = zext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
   %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
   %vd = mul <vscale x 8 x i64> %vb, %vc
-  %head2 = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %ve = lshr <vscale x 8 x i64> %vd, %splat2
+  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
   %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %vf
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
index 564f278372cc02..f958fe815caaab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
@@ -16,10 +16,8 @@ define <vscale x 1 x i8> @vnmsac_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -31,11 +29,9 @@ define <vscale x 1 x i8> @vnmsac_vv_nxv1i8_unmasked(<vscale x 1 x i8> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -48,10 +44,8 @@ define <vscale x 1 x i8> @vnmsac_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b, <vscale
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -65,11 +59,9 @@ define <vscale x 1 x i8> @vnmsac_vx_nxv1i8_unmasked(<vscale x 1 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
 
@@ -80,10 +72,8 @@ define <vscale x 1 x i8> @vnmsac_vv_nxv1i8_ta(<vscale x 1 x i8> %a, <vscale x 1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -97,10 +87,8 @@ define <vscale x 1 x i8> @vnmsac_vx_nxv1i8_ta(<vscale x 1 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
   ret <vscale x 1 x i8> %u
 }
@@ -117,10 +105,8 @@ define <vscale x 2 x i8> @vnmsac_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -132,11 +118,9 @@ define <vscale x 2 x i8> @vnmsac_vv_nxv2i8_unmasked(<vscale x 2 x i8> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -149,10 +133,8 @@ define <vscale x 2 x i8> @vnmsac_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b, <vscale
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -166,11 +148,9 @@ define <vscale x 2 x i8> @vnmsac_vx_nxv2i8_unmasked(<vscale x 2 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
 
@@ -181,10 +161,8 @@ define <vscale x 2 x i8> @vnmsac_vv_nxv2i8_ta(<vscale x 2 x i8> %a, <vscale x 2
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -198,10 +176,8 @@ define <vscale x 2 x i8> @vnmsac_vx_nxv2i8_ta(<vscale x 2 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
   ret <vscale x 2 x i8> %u
 }
@@ -218,10 +194,8 @@ define <vscale x 4 x i8> @vnmsac_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -233,11 +207,9 @@ define <vscale x 4 x i8> @vnmsac_vv_nxv4i8_unmasked(<vscale x 4 x i8> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -250,10 +222,8 @@ define <vscale x 4 x i8> @vnmsac_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b, <vscale
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -267,11 +237,9 @@ define <vscale x 4 x i8> @vnmsac_vx_nxv4i8_unmasked(<vscale x 4 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
 
@@ -282,10 +250,8 @@ define <vscale x 4 x i8> @vnmsac_vv_nxv4i8_ta(<vscale x 4 x i8> %a, <vscale x 4
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -299,10 +265,8 @@ define <vscale x 4 x i8> @vnmsac_vx_nxv4i8_ta(<vscale x 4 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
   ret <vscale x 4 x i8> %u
 }
@@ -319,10 +283,8 @@ define <vscale x 8 x i8> @vnmsac_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -334,11 +296,9 @@ define <vscale x 8 x i8> @vnmsac_vv_nxv8i8_unmasked(<vscale x 8 x i8> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -351,10 +311,8 @@ define <vscale x 8 x i8> @vnmsac_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b, <vscale
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -368,11 +326,9 @@ define <vscale x 8 x i8> @vnmsac_vx_nxv8i8_unmasked(<vscale x 8 x i8> %a, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
 
@@ -383,10 +339,8 @@ define <vscale x 8 x i8> @vnmsac_vv_nxv8i8_ta(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -400,10 +354,8 @@ define <vscale x 8 x i8> @vnmsac_vx_nxv8i8_ta(<vscale x 8 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
   ret <vscale x 8 x i8> %u
 }
@@ -420,10 +372,8 @@ define <vscale x 16 x i8> @vnmsac_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -435,11 +385,9 @@ define <vscale x 16 x i8> @vnmsac_vv_nxv16i8_unmasked(<vscale x 16 x i8> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -452,10 +400,8 @@ define <vscale x 16 x i8> @vnmsac_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -469,11 +415,9 @@ define <vscale x 16 x i8> @vnmsac_vx_nxv16i8_unmasked(<vscale x 16 x i8> %a, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
 
@@ -484,10 +428,8 @@ define <vscale x 16 x i8> @vnmsac_vv_nxv16i8_ta(<vscale x 16 x i8> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -501,10 +443,8 @@ define <vscale x 16 x i8> @vnmsac_vx_nxv16i8_ta(<vscale x 16 x i8> %a, i8 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
   ret <vscale x 16 x i8> %u
 }
@@ -521,10 +461,8 @@ define <vscale x 32 x i8> @vnmsac_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -536,11 +474,9 @@ define <vscale x 32 x i8> @vnmsac_vv_nxv32i8_unmasked(<vscale x 32 x i8> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -553,10 +489,8 @@ define <vscale x 32 x i8> @vnmsac_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -570,11 +504,9 @@ define <vscale x 32 x i8> @vnmsac_vx_nxv32i8_unmasked(<vscale x 32 x i8> %a, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
 
@@ -585,10 +517,8 @@ define <vscale x 32 x i8> @vnmsac_vv_nxv32i8_ta(<vscale x 32 x i8> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -602,10 +532,8 @@ define <vscale x 32 x i8> @vnmsac_vx_nxv32i8_ta(<vscale x 32 x i8> %a, i8 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
   ret <vscale x 32 x i8> %u
 }
@@ -623,10 +551,8 @@ define <vscale x 64 x i8> @vnmsac_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -639,11 +565,9 @@ define <vscale x 64 x i8> @vnmsac_vv_nxv64i8_unmasked(<vscale x 64 x i8> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -656,10 +580,8 @@ define <vscale x 64 x i8> @vnmsac_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b, <vsca
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -673,11 +595,9 @@ define <vscale x 64 x i8> @vnmsac_vx_nxv64i8_unmasked(<vscale x 64 x i8> %a, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
-  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
 
@@ -689,10 +609,8 @@ define <vscale x 64 x i8> @vnmsac_vv_nxv64i8_ta(<vscale x 64 x i8> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -706,10 +624,8 @@ define <vscale x 64 x i8> @vnmsac_vx_nxv64i8_ta(<vscale x 64 x i8> %a, i8 %b, <v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
-  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
   ret <vscale x 64 x i8> %u
 }
@@ -726,10 +642,8 @@ define <vscale x 1 x i16> @vnmsac_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -741,11 +655,9 @@ define <vscale x 1 x i16> @vnmsac_vv_nxv1i16_unmasked(<vscale x 1 x i16> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -758,10 +670,8 @@ define <vscale x 1 x i16> @vnmsac_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -775,11 +685,9 @@ define <vscale x 1 x i16> @vnmsac_vx_nxv1i16_unmasked(<vscale x 1 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
 
@@ -790,10 +698,8 @@ define <vscale x 1 x i16> @vnmsac_vv_nxv1i16_ta(<vscale x 1 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -807,10 +713,8 @@ define <vscale x 1 x i16> @vnmsac_vx_nxv1i16_ta(<vscale x 1 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
   ret <vscale x 1 x i16> %u
 }
@@ -827,10 +731,8 @@ define <vscale x 2 x i16> @vnmsac_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -842,11 +744,9 @@ define <vscale x 2 x i16> @vnmsac_vv_nxv2i16_unmasked(<vscale x 2 x i16> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -859,10 +759,8 @@ define <vscale x 2 x i16> @vnmsac_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -876,11 +774,9 @@ define <vscale x 2 x i16> @vnmsac_vx_nxv2i16_unmasked(<vscale x 2 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
 
@@ -891,10 +787,8 @@ define <vscale x 2 x i16> @vnmsac_vv_nxv2i16_ta(<vscale x 2 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -908,10 +802,8 @@ define <vscale x 2 x i16> @vnmsac_vx_nxv2i16_ta(<vscale x 2 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
   ret <vscale x 2 x i16> %u
 }
@@ -928,10 +820,8 @@ define <vscale x 4 x i16> @vnmsac_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -943,11 +833,9 @@ define <vscale x 4 x i16> @vnmsac_vv_nxv4i16_unmasked(<vscale x 4 x i16> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -960,10 +848,8 @@ define <vscale x 4 x i16> @vnmsac_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -977,11 +863,9 @@ define <vscale x 4 x i16> @vnmsac_vx_nxv4i16_unmasked(<vscale x 4 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
 
@@ -992,10 +876,8 @@ define <vscale x 4 x i16> @vnmsac_vv_nxv4i16_ta(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1009,10 +891,8 @@ define <vscale x 4 x i16> @vnmsac_vx_nxv4i16_ta(<vscale x 4 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
   ret <vscale x 4 x i16> %u
 }
@@ -1029,10 +909,8 @@ define <vscale x 8 x i16> @vnmsac_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1044,11 +922,9 @@ define <vscale x 8 x i16> @vnmsac_vv_nxv8i16_unmasked(<vscale x 8 x i16> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1061,10 +937,8 @@ define <vscale x 8 x i16> @vnmsac_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1078,11 +952,9 @@ define <vscale x 8 x i16> @vnmsac_vx_nxv8i16_unmasked(<vscale x 8 x i16> %a, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
 
@@ -1093,10 +965,8 @@ define <vscale x 8 x i16> @vnmsac_vv_nxv8i16_ta(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1110,10 +980,8 @@ define <vscale x 8 x i16> @vnmsac_vx_nxv8i16_ta(<vscale x 8 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
   ret <vscale x 8 x i16> %u
 }
@@ -1130,10 +998,8 @@ define <vscale x 16 x i16> @vnmsac_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1145,11 +1011,9 @@ define <vscale x 16 x i16> @vnmsac_vv_nxv16i16_unmasked(<vscale x 16 x i16> %a,
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1162,10 +1026,8 @@ define <vscale x 16 x i16> @vnmsac_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1179,11 +1041,9 @@ define <vscale x 16 x i16> @vnmsac_vx_nxv16i16_unmasked(<vscale x 16 x i16> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
 
@@ -1194,10 +1054,8 @@ define <vscale x 16 x i16> @vnmsac_vv_nxv16i16_ta(<vscale x 16 x i16> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1211,10 +1069,8 @@ define <vscale x 16 x i16> @vnmsac_vx_nxv16i16_ta(<vscale x 16 x i16> %a, i16 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
   ret <vscale x 16 x i16> %u
 }
@@ -1232,10 +1088,8 @@ define <vscale x 32 x i16> @vnmsac_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1248,11 +1102,9 @@ define <vscale x 32 x i16> @vnmsac_vv_nxv32i16_unmasked(<vscale x 32 x i16> %a,
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1265,10 +1117,8 @@ define <vscale x 32 x i16> @vnmsac_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1282,11 +1132,9 @@ define <vscale x 32 x i16> @vnmsac_vx_nxv32i16_unmasked(<vscale x 32 x i16> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
-  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
 
@@ -1298,10 +1146,8 @@ define <vscale x 32 x i16> @vnmsac_vv_nxv32i16_ta(<vscale x 32 x i16> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1315,10 +1161,8 @@ define <vscale x 32 x i16> @vnmsac_vx_nxv32i16_ta(<vscale x 32 x i16> %a, i16 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
-  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
   ret <vscale x 32 x i16> %u
 }
@@ -1335,10 +1179,8 @@ define <vscale x 1 x i32> @vnmsac_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1350,11 +1192,9 @@ define <vscale x 1 x i32> @vnmsac_vv_nxv1i32_unmasked(<vscale x 1 x i32> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1367,10 +1207,8 @@ define <vscale x 1 x i32> @vnmsac_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1384,11 +1222,9 @@ define <vscale x 1 x i32> @vnmsac_vx_nxv1i32_unmasked(<vscale x 1 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
 
@@ -1399,10 +1235,8 @@ define <vscale x 1 x i32> @vnmsac_vv_nxv1i32_ta(<vscale x 1 x i32> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1416,10 +1250,8 @@ define <vscale x 1 x i32> @vnmsac_vx_nxv1i32_ta(<vscale x 1 x i32> %a, i32 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %u
 }
@@ -1436,10 +1268,8 @@ define <vscale x 2 x i32> @vnmsac_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1451,11 +1281,9 @@ define <vscale x 2 x i32> @vnmsac_vv_nxv2i32_unmasked(<vscale x 2 x i32> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1468,10 +1296,8 @@ define <vscale x 2 x i32> @vnmsac_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1485,11 +1311,9 @@ define <vscale x 2 x i32> @vnmsac_vx_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
 
@@ -1500,10 +1324,8 @@ define <vscale x 2 x i32> @vnmsac_vv_nxv2i32_ta(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1517,10 +1339,8 @@ define <vscale x 2 x i32> @vnmsac_vx_nxv2i32_ta(<vscale x 2 x i32> %a, i32 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
   ret <vscale x 2 x i32> %u
 }
@@ -1537,10 +1357,8 @@ define <vscale x 4 x i32> @vnmsac_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1552,11 +1370,9 @@ define <vscale x 4 x i32> @vnmsac_vv_nxv4i32_unmasked(<vscale x 4 x i32> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1569,10 +1385,8 @@ define <vscale x 4 x i32> @vnmsac_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1586,11 +1400,9 @@ define <vscale x 4 x i32> @vnmsac_vx_nxv4i32_unmasked(<vscale x 4 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
 
@@ -1601,10 +1413,8 @@ define <vscale x 4 x i32> @vnmsac_vv_nxv4i32_ta(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1618,10 +1428,8 @@ define <vscale x 4 x i32> @vnmsac_vx_nxv4i32_ta(<vscale x 4 x i32> %a, i32 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
   ret <vscale x 4 x i32> %u
 }
@@ -1638,10 +1446,8 @@ define <vscale x 8 x i32> @vnmsac_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1653,11 +1459,9 @@ define <vscale x 8 x i32> @vnmsac_vv_nxv8i32_unmasked(<vscale x 8 x i32> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1670,10 +1474,8 @@ define <vscale x 8 x i32> @vnmsac_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b, <vsc
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1687,11 +1489,9 @@ define <vscale x 8 x i32> @vnmsac_vx_nxv8i32_unmasked(<vscale x 8 x i32> %a, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
 
@@ -1702,10 +1502,8 @@ define <vscale x 8 x i32> @vnmsac_vv_nxv8i32_ta(<vscale x 8 x i32> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1719,10 +1517,8 @@ define <vscale x 8 x i32> @vnmsac_vx_nxv8i32_ta(<vscale x 8 x i32> %a, i32 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
   ret <vscale x 8 x i32> %u
 }
@@ -1740,10 +1536,8 @@ define <vscale x 16 x i32> @vnmsac_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1756,11 +1550,9 @@ define <vscale x 16 x i32> @vnmsac_vv_nxv16i32_unmasked(<vscale x 16 x i32> %a,
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1773,10 +1565,8 @@ define <vscale x 16 x i32> @vnmsac_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b, <
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1790,11 +1580,9 @@ define <vscale x 16 x i32> @vnmsac_vx_nxv16i32_unmasked(<vscale x 16 x i32> %a,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
-  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
 
@@ -1806,10 +1594,8 @@ define <vscale x 16 x i32> @vnmsac_vv_nxv16i32_ta(<vscale x 16 x i32> %a, <vscal
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1823,10 +1609,8 @@ define <vscale x 16 x i32> @vnmsac_vx_nxv16i32_ta(<vscale x 16 x i32> %a, i32 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
-  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
   ret <vscale x 16 x i32> %u
 }
@@ -1843,10 +1627,8 @@ define <vscale x 1 x i64> @vnmsac_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1858,11 +1640,9 @@ define <vscale x 1 x i64> @vnmsac_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1890,10 +1670,8 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsc
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1922,11 +1700,9 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
-  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
 
@@ -1937,10 +1713,8 @@ define <vscale x 1 x i64> @vnmsac_vv_nxv1i64_ta(<vscale x 1 x i64> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v10, v8, v9, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1969,10 +1743,8 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
-  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
   ret <vscale x 1 x i64> %u
 }
@@ -1989,10 +1761,8 @@ define <vscale x 2 x i64> @vnmsac_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2004,11 +1774,9 @@ define <vscale x 2 x i64> @vnmsac_vv_nxv2i64_unmasked(<vscale x 2 x i64> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2036,10 +1804,8 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsc
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2068,11 +1834,9 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
-  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
 
@@ -2083,10 +1847,8 @@ define <vscale x 2 x i64> @vnmsac_vv_nxv2i64_ta(<vscale x 2 x i64> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v12, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2115,10 +1877,8 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
-  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
   ret <vscale x 2 x i64> %u
 }
@@ -2135,10 +1895,8 @@ define <vscale x 4 x i64> @vnmsac_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2150,11 +1908,9 @@ define <vscale x 4 x i64> @vnmsac_vv_nxv4i64_unmasked(<vscale x 4 x i64> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2182,10 +1938,8 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsc
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2214,11 +1968,9 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
-  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
 
@@ -2229,10 +1981,8 @@ define <vscale x 4 x i64> @vnmsac_vv_nxv4i64_ta(<vscale x 4 x i64> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2261,10 +2011,8 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
-  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
   ret <vscale x 4 x i64> %u
 }
@@ -2282,10 +2030,8 @@ define <vscale x 8 x i64> @vnmsac_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2298,11 +2044,9 @@ define <vscale x 8 x i64> @vnmsac_vv_nxv8i64_unmasked(<vscale x 8 x i64> %a, <vs
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2330,10 +2074,8 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsc
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2362,11 +2104,9 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
-  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
 
@@ -2378,10 +2118,8 @@ define <vscale x 8 x i64> @vnmsac_vv_nxv8i64_ta(<vscale x 8 x i64> %a, <vscale x
 ; CHECK-NEXT:    vnmsac.vv v24, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v24
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
@@ -2410,10 +2148,8 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
-  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
+  %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
   %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
   ret <vscale x 8 x i64> %u
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
index 13b7a8e1991acc..7b97ca3c0090ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
@@ -119,9 +119,7 @@ define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %vb = sext <vscale x 1 x i32> splat (i32 15) to <vscale x 1 x i64>
   %x = ashr <vscale x 1 x i64> %va, %vb
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %y
@@ -192,9 +190,7 @@ define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v10, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %vb = sext <vscale x 2 x i32> splat (i32 15) to <vscale x 2 x i64>
   %x = ashr <vscale x 2 x i64> %va, %vb
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %y
@@ -265,9 +261,7 @@ define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v12, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %vb = sext <vscale x 4 x i32> splat (i32 15) to <vscale x 4 x i64>
   %x = ashr <vscale x 4 x i64> %va, %vb
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %y
@@ -338,9 +332,7 @@ define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %vb = sext <vscale x 8 x i32> splat (i32 15) to <vscale x 8 x i64>
   %x = ashr <vscale x 8 x i64> %va, %vb
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
@@ -406,9 +398,7 @@ define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %vb = zext <vscale x 1 x i32> splat (i32 15) to <vscale x 1 x i64>
   %x = ashr <vscale x 1 x i64> %va, %vb
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %y
@@ -479,9 +469,7 @@ define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v10, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %vb = zext <vscale x 2 x i32> splat (i32 15) to <vscale x 2 x i64>
   %x = ashr <vscale x 2 x i64> %va, %vb
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %y
@@ -552,9 +540,7 @@ define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v12, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %vb = zext <vscale x 4 x i32> splat (i32 15) to <vscale x 4 x i64>
   %x = ashr <vscale x 4 x i64> %va, %vb
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %y
@@ -625,9 +611,7 @@ define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %vb = zext <vscale x 8 x i32> splat (i32 15) to <vscale x 8 x i64>
   %x = ashr <vscale x 8 x i64> %va, %vb
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll
index 18f743482a5616..cb7a020d0b964f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-vp.ll
@@ -12,10 +12,8 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i32> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnsra.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %vr
 }
@@ -27,11 +25,9 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16_unmasked(<vscale x 1 x i32> %a, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnsra.wv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x i16> %vr
 }
 
@@ -45,10 +41,8 @@ define <vscale x 1 x i32> @vsra_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsra.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %vr
 }
@@ -59,10 +53,8 @@ define <vscale x 1 x i32> @vsra_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsra.wv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x i32> %vr
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
index 4c1e53cf9a01bb..2b912662b4b947 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
@@ -119,9 +119,7 @@ define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %vb = sext <vscale x 1 x i32> splat (i32 15) to <vscale x 1 x i64>
   %x = lshr <vscale x 1 x i64> %va, %vb
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %y
@@ -192,9 +190,7 @@ define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v10, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %vb = sext <vscale x 2 x i32> splat (i32 15) to <vscale x 2 x i64>
   %x = lshr <vscale x 2 x i64> %va, %vb
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %y
@@ -265,9 +261,7 @@ define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v12, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %vb = sext <vscale x 4 x i32> splat (i32 15) to <vscale x 4 x i64>
   %x = lshr <vscale x 4 x i64> %va, %vb
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %y
@@ -338,9 +332,7 @@ define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %vb = sext <vscale x 8 x i32> splat (i32 15) to <vscale x 8 x i64>
   %x = lshr <vscale x 8 x i64> %va, %vb
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
@@ -406,9 +398,7 @@ define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vb = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %vb = zext <vscale x 1 x i32> splat (i32 15) to <vscale x 1 x i64>
   %x = lshr <vscale x 1 x i64> %va, %vb
   %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %y
@@ -479,9 +469,7 @@ define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v10, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vb = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %vb = zext <vscale x 2 x i32> splat (i32 15) to <vscale x 2 x i64>
   %x = lshr <vscale x 2 x i64> %va, %vb
   %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %y
@@ -552,9 +540,7 @@ define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v12, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vb = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %vb = zext <vscale x 4 x i32> splat (i32 15) to <vscale x 4 x i64>
   %x = lshr <vscale x 4 x i64> %va, %vb
   %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %y
@@ -625,9 +611,7 @@ define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 15
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vb = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %vb = zext <vscale x 8 x i32> splat (i32 15) to <vscale x 8 x i64>
   %x = lshr <vscale x 8 x i64> %va, %vb
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll
index 059d5bf3e095b8..e6e86011745b40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-vp.ll
@@ -12,10 +12,8 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i32> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %vr
 }
@@ -27,11 +25,9 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16_unmasked(<vscale x 1 x i32> %a, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnsrl.wv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vr = call <vscale x 1 x i16> @llvm.vp.trunc.nxv1i16.nxv1i32(<vscale x 1 x i32> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x i16> %vr
 }
 
@@ -45,10 +41,8 @@ define <vscale x 1 x i32> @vsra_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %vr
 }
@@ -59,10 +53,8 @@ define <vscale x 1 x i32> @vsra_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnsrl.wv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> %allones, i32 %evl)
+  %bext = call <vscale x 1 x i64> @llvm.vp.sext.nxv1i64.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %vr = call <vscale x 1 x i32> @llvm.vp.trunc.nxv1i32.nxv1i64(<vscale x 1 x i64> %v, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
   ret <vscale x 1 x i32> %vr
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
index 356467698d9412..fbbd71cb35445f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
@@ -20,9 +20,7 @@ define <vscale x 1 x i8> @vor_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i8> %va, %splat
+  %vc = or <vscale x 1 x i8> %va, splat (i8 -12)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -32,9 +30,7 @@ define <vscale x 1 x i8> @vor_vx_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i8> %va, %splat
+  %vc = or <vscale x 1 x i8> %va, splat (i8 15)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -45,9 +41,7 @@ define <vscale x 1 x i8> @vor_vx_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i8> %va, %splat
+  %vc = or <vscale x 1 x i8> %va, splat (i8 16)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -69,9 +63,7 @@ define <vscale x 2 x i8> @vor_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i8> %va, %splat
+  %vc = or <vscale x 2 x i8> %va, splat (i8 -12)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -81,9 +73,7 @@ define <vscale x 2 x i8> @vor_vx_nxv2i8_1(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i8> %va, %splat
+  %vc = or <vscale x 2 x i8> %va, splat (i8 15)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -94,9 +84,7 @@ define <vscale x 2 x i8> @vor_vx_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i8> %va, %splat
+  %vc = or <vscale x 2 x i8> %va, splat (i8 16)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -118,9 +106,7 @@ define <vscale x 4 x i8> @vor_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i8> %va, %splat
+  %vc = or <vscale x 4 x i8> %va, splat (i8 -12)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -130,9 +116,7 @@ define <vscale x 4 x i8> @vor_vx_nxv4i8_1(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i8> %va, %splat
+  %vc = or <vscale x 4 x i8> %va, splat (i8 15)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -143,9 +127,7 @@ define <vscale x 4 x i8> @vor_vx_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i8> %va, %splat
+  %vc = or <vscale x 4 x i8> %va, splat (i8 16)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -167,9 +149,7 @@ define <vscale x 8 x i8> @vor_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i8> %va, %splat
+  %vc = or <vscale x 8 x i8> %va, splat (i8 -12)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -179,9 +159,7 @@ define <vscale x 8 x i8> @vor_vx_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i8> %va, %splat
+  %vc = or <vscale x 8 x i8> %va, splat (i8 15)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -192,9 +170,7 @@ define <vscale x 8 x i8> @vor_vx_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i8> %va, %splat
+  %vc = or <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -216,9 +192,7 @@ define <vscale x 16 x i8> @vor_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i8> %va, %splat
+  %vc = or <vscale x 16 x i8> %va, splat (i8 -12)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -228,9 +202,7 @@ define <vscale x 16 x i8> @vor_vx_nxv16i8_1(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i8> %va, %splat
+  %vc = or <vscale x 16 x i8> %va, splat (i8 15)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -241,9 +213,7 @@ define <vscale x 16 x i8> @vor_vx_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i8> %va, %splat
+  %vc = or <vscale x 16 x i8> %va, splat (i8 16)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -265,9 +235,7 @@ define <vscale x 32 x i8> @vor_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i8> %va, %splat
+  %vc = or <vscale x 32 x i8> %va, splat (i8 -12)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -277,9 +245,7 @@ define <vscale x 32 x i8> @vor_vx_nxv32i8_1(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i8> %va, %splat
+  %vc = or <vscale x 32 x i8> %va, splat (i8 15)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -290,9 +256,7 @@ define <vscale x 32 x i8> @vor_vx_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i8> %va, %splat
+  %vc = or <vscale x 32 x i8> %va, splat (i8 16)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -314,9 +278,7 @@ define <vscale x 64 x i8> @vor_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -12, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = or <vscale x 64 x i8> %va, %splat
+  %vc = or <vscale x 64 x i8> %va, splat (i8 -12)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -326,9 +288,7 @@ define <vscale x 64 x i8> @vor_vx_nxv64i8_1(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 15, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = or <vscale x 64 x i8> %va, %splat
+  %vc = or <vscale x 64 x i8> %va, splat (i8 15)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -339,9 +299,7 @@ define <vscale x 64 x i8> @vor_vx_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = or <vscale x 64 x i8> %va, %splat
+  %vc = or <vscale x 64 x i8> %va, splat (i8 16)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -363,9 +321,7 @@ define <vscale x 1 x i16> @vor_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i16> %va, %splat
+  %vc = or <vscale x 1 x i16> %va, splat (i16 -12)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -375,9 +331,7 @@ define <vscale x 1 x i16> @vor_vx_nxv1i16_1(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i16> %va, %splat
+  %vc = or <vscale x 1 x i16> %va, splat (i16 15)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -388,9 +342,7 @@ define <vscale x 1 x i16> @vor_vx_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i16> %va, %splat
+  %vc = or <vscale x 1 x i16> %va, splat (i16 16)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -412,9 +364,7 @@ define <vscale x 2 x i16> @vor_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i16> %va, %splat
+  %vc = or <vscale x 2 x i16> %va, splat (i16 -12)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -424,9 +374,7 @@ define <vscale x 2 x i16> @vor_vx_nxv2i16_1(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i16> %va, %splat
+  %vc = or <vscale x 2 x i16> %va, splat (i16 15)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -437,9 +385,7 @@ define <vscale x 2 x i16> @vor_vx_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i16> %va, %splat
+  %vc = or <vscale x 2 x i16> %va, splat (i16 16)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -461,9 +407,7 @@ define <vscale x 4 x i16> @vor_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i16> %va, %splat
+  %vc = or <vscale x 4 x i16> %va, splat (i16 -12)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -473,9 +417,7 @@ define <vscale x 4 x i16> @vor_vx_nxv4i16_1(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i16> %va, %splat
+  %vc = or <vscale x 4 x i16> %va, splat (i16 15)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -486,9 +428,7 @@ define <vscale x 4 x i16> @vor_vx_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i16> %va, %splat
+  %vc = or <vscale x 4 x i16> %va, splat (i16 16)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -510,9 +450,7 @@ define <vscale x 8 x i16> @vor_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i16> %va, %splat
+  %vc = or <vscale x 8 x i16> %va, splat (i16 -12)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -522,9 +460,7 @@ define <vscale x 8 x i16> @vor_vx_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i16> %va, %splat
+  %vc = or <vscale x 8 x i16> %va, splat (i16 15)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -535,9 +471,7 @@ define <vscale x 8 x i16> @vor_vx_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i16> %va, %splat
+  %vc = or <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -559,9 +493,7 @@ define <vscale x 16 x i16> @vor_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i16> %va, %splat
+  %vc = or <vscale x 16 x i16> %va, splat (i16 -12)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -571,9 +503,7 @@ define <vscale x 16 x i16> @vor_vx_nxv16i16_1(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i16> %va, %splat
+  %vc = or <vscale x 16 x i16> %va, splat (i16 15)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -584,9 +514,7 @@ define <vscale x 16 x i16> @vor_vx_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i16> %va, %splat
+  %vc = or <vscale x 16 x i16> %va, splat (i16 16)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -608,9 +536,7 @@ define <vscale x 32 x i16> @vor_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -12, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i16> %va, %splat
+  %vc = or <vscale x 32 x i16> %va, splat (i16 -12)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -620,9 +546,7 @@ define <vscale x 32 x i16> @vor_vx_nxv32i16_1(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 15, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i16> %va, %splat
+  %vc = or <vscale x 32 x i16> %va, splat (i16 15)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -633,9 +557,7 @@ define <vscale x 32 x i16> @vor_vx_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = or <vscale x 32 x i16> %va, %splat
+  %vc = or <vscale x 32 x i16> %va, splat (i16 16)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -657,9 +579,7 @@ define <vscale x 1 x i32> @vor_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -12, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i32> %va, %splat
+  %vc = or <vscale x 1 x i32> %va, splat (i32 -12)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -669,9 +589,7 @@ define <vscale x 1 x i32> @vor_vx_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i32> %va, %splat
+  %vc = or <vscale x 1 x i32> %va, splat (i32 15)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -682,9 +600,7 @@ define <vscale x 1 x i32> @vor_vx_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i32> %va, %splat
+  %vc = or <vscale x 1 x i32> %va, splat (i32 16)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -706,9 +622,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -12, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i32> %va, %splat
+  %vc = or <vscale x 2 x i32> %va, splat (i32 -12)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -718,9 +632,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i32> %va, %splat
+  %vc = or <vscale x 2 x i32> %va, splat (i32 15)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -731,9 +643,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i32> %va, %splat
+  %vc = or <vscale x 2 x i32> %va, splat (i32 16)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -755,9 +665,7 @@ define <vscale x 4 x i32> @vor_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -12, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i32> %va, %splat
+  %vc = or <vscale x 4 x i32> %va, splat (i32 -12)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -767,9 +675,7 @@ define <vscale x 4 x i32> @vor_vx_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i32> %va, %splat
+  %vc = or <vscale x 4 x i32> %va, splat (i32 15)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -780,9 +686,7 @@ define <vscale x 4 x i32> @vor_vx_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i32> %va, %splat
+  %vc = or <vscale x 4 x i32> %va, splat (i32 16)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -804,9 +708,7 @@ define <vscale x 8 x i32> @vor_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -12, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i32> %va, %splat
+  %vc = or <vscale x 8 x i32> %va, splat (i32 -12)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -816,9 +718,7 @@ define <vscale x 8 x i32> @vor_vx_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i32> %va, %splat
+  %vc = or <vscale x 8 x i32> %va, splat (i32 15)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -829,9 +729,7 @@ define <vscale x 8 x i32> @vor_vx_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i32> %va, %splat
+  %vc = or <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -853,9 +751,7 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -12, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i32> %va, %splat
+  %vc = or <vscale x 16 x i32> %va, splat (i32 -12)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -865,9 +761,7 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_1(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 15, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i32> %va, %splat
+  %vc = or <vscale x 16 x i32> %va, splat (i32 15)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -878,9 +772,7 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = or <vscale x 16 x i32> %va, %splat
+  %vc = or <vscale x 16 x i32> %va, splat (i32 16)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -915,9 +807,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -12, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i64> %va, %splat
+  %vc = or <vscale x 1 x i64> %va, splat (i64 -12)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -927,9 +817,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i64> %va, %splat
+  %vc = or <vscale x 1 x i64> %va, splat (i64 15)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -940,9 +828,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = or <vscale x 1 x i64> %va, %splat
+  %vc = or <vscale x 1 x i64> %va, splat (i64 16)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -977,9 +863,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -12, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i64> %va, %splat
+  %vc = or <vscale x 2 x i64> %va, splat (i64 -12)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -989,9 +873,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i64> %va, %splat
+  %vc = or <vscale x 2 x i64> %va, splat (i64 15)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1002,9 +884,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = or <vscale x 2 x i64> %va, %splat
+  %vc = or <vscale x 2 x i64> %va, splat (i64 16)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1039,9 +919,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -12, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i64> %va, %splat
+  %vc = or <vscale x 4 x i64> %va, splat (i64 -12)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1051,9 +929,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i64> %va, %splat
+  %vc = or <vscale x 4 x i64> %va, splat (i64 15)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1064,9 +940,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = or <vscale x 4 x i64> %va, %splat
+  %vc = or <vscale x 4 x i64> %va, splat (i64 16)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1101,9 +975,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, -12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -12, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i64> %va, %splat
+  %vc = or <vscale x 8 x i64> %va, splat (i64 -12)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1113,9 +985,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 15, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i64> %va, %splat
+  %vc = or <vscale x 8 x i64> %va, splat (i64 15)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1126,9 +996,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i64> %va, %splat
+  %vc = or <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1139,9 +1007,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = or <vscale x 8 x i64> %va, %splat
+  %vc = or <vscale x 8 x i64> %va, splat (i64 -1)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1206,9 +1072,7 @@ define <vscale x 8 x i32> @vor_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> zeroinitializer
   %vc = or <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
index ccab5e40d450ff..b9388e58797048 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vor_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -62,9 +60,7 @@ define <vscale x 1 x i8> @vor_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -74,9 +70,7 @@ define <vscale x 1 x i8> @vor_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,11 +80,7 @@ define <vscale x 1 x i8> @vor_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -112,9 +102,7 @@ define <vscale x 2 x i8> @vor_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -138,9 +126,7 @@ define <vscale x 2 x i8> @vor_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -150,9 +136,7 @@ define <vscale x 2 x i8> @vor_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -162,11 +146,7 @@ define <vscale x 2 x i8> @vor_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -188,9 +168,7 @@ define <vscale x 4 x i8> @vor_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -214,9 +192,7 @@ define <vscale x 4 x i8> @vor_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -226,9 +202,7 @@ define <vscale x 4 x i8> @vor_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -238,11 +212,7 @@ define <vscale x 4 x i8> @vor_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -264,9 +234,7 @@ define <vscale x 8 x i8> @vor_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -290,9 +258,7 @@ define <vscale x 8 x i8> @vor_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -302,9 +268,7 @@ define <vscale x 8 x i8> @vor_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -314,11 +278,7 @@ define <vscale x 8 x i8> @vor_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -340,9 +300,7 @@ define <vscale x 16 x i8> @vor_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -366,9 +324,7 @@ define <vscale x 16 x i8> @vor_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -378,9 +334,7 @@ define <vscale x 16 x i8> @vor_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -390,11 +344,7 @@ define <vscale x 16 x i8> @vor_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -416,9 +366,7 @@ define <vscale x 32 x i8> @vor_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -442,9 +390,7 @@ define <vscale x 32 x i8> @vor_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -454,9 +400,7 @@ define <vscale x 32 x i8> @vor_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 5), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -466,11 +410,7 @@ define <vscale x 32 x i8> @vor_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 5), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -492,9 +432,7 @@ define <vscale x 64 x i8> @vor_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -518,9 +456,7 @@ define <vscale x 64 x i8> @vor_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -530,9 +466,7 @@ define <vscale x 64 x i8> @vor_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 5), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -542,11 +476,7 @@ define <vscale x 64 x i8> @vor_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 5), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -568,9 +498,7 @@ define <vscale x 1 x i16> @vor_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -594,9 +522,7 @@ define <vscale x 1 x i16> @vor_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -606,9 +532,7 @@ define <vscale x 1 x i16> @vor_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -618,11 +542,7 @@ define <vscale x 1 x i16> @vor_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -644,9 +564,7 @@ define <vscale x 2 x i16> @vor_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -670,9 +588,7 @@ define <vscale x 2 x i16> @vor_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -682,9 +598,7 @@ define <vscale x 2 x i16> @vor_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -694,11 +608,7 @@ define <vscale x 2 x i16> @vor_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -720,9 +630,7 @@ define <vscale x 4 x i16> @vor_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -746,9 +654,7 @@ define <vscale x 4 x i16> @vor_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -758,9 +664,7 @@ define <vscale x 4 x i16> @vor_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -770,11 +674,7 @@ define <vscale x 4 x i16> @vor_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -796,9 +696,7 @@ define <vscale x 8 x i16> @vor_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -822,9 +720,7 @@ define <vscale x 8 x i16> @vor_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -834,9 +730,7 @@ define <vscale x 8 x i16> @vor_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -846,11 +740,7 @@ define <vscale x 8 x i16> @vor_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -872,9 +762,7 @@ define <vscale x 16 x i16> @vor_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -898,9 +786,7 @@ define <vscale x 16 x i16> @vor_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -910,9 +796,7 @@ define <vscale x 16 x i16> @vor_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -922,11 +806,7 @@ define <vscale x 16 x i16> @vor_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -948,9 +828,7 @@ define <vscale x 32 x i16> @vor_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -974,9 +852,7 @@ define <vscale x 32 x i16> @vor_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -986,9 +862,7 @@ define <vscale x 32 x i16> @vor_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 5), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -998,11 +872,7 @@ define <vscale x 32 x i16> @vor_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 5), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1024,9 +894,7 @@ define <vscale x 1 x i32> @vor_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1050,9 +918,7 @@ define <vscale x 1 x i32> @vor_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1062,9 +928,7 @@ define <vscale x 1 x i32> @vor_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1074,11 +938,7 @@ define <vscale x 1 x i32> @vor_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1100,9 +960,7 @@ define <vscale x 2 x i32> @vor_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1138,9 +996,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1152,9 +1008,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_unmasked_commute(<vscale x 2 x i32> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1164,9 +1018,7 @@ define <vscale x 2 x i32> @vor_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1176,11 +1028,7 @@ define <vscale x 2 x i32> @vor_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1202,9 +1050,7 @@ define <vscale x 4 x i32> @vor_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1228,9 +1074,7 @@ define <vscale x 4 x i32> @vor_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1240,9 +1084,7 @@ define <vscale x 4 x i32> @vor_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1252,11 +1094,7 @@ define <vscale x 4 x i32> @vor_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1278,9 +1116,7 @@ define <vscale x 8 x i32> @vor_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1304,9 +1140,7 @@ define <vscale x 8 x i32> @vor_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1316,9 +1150,7 @@ define <vscale x 8 x i32> @vor_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1328,11 +1160,7 @@ define <vscale x 8 x i32> @vor_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1354,9 +1182,7 @@ define <vscale x 10 x i32> @vor_vv_nxv10i32_unmasked(<vscale x 10 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 10 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> poison, <vscale x 10 x i32> zeroinitializer
-  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 %evl)
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 10 x i32> %v
 }
 
@@ -1380,9 +1206,7 @@ define <vscale x 10 x i32> @vor_vx_nxv10i32_unmasked(<vscale x 10 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 10 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> poison, <vscale x 10 x i32> zeroinitializer
-  %head = insertelement <vscale x 10 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> poison, <vscale x 10 x i32> zeroinitializer
-  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 10 x i32> %v
 }
 
@@ -1392,9 +1216,7 @@ define <vscale x 10 x i32> @vor_vi_nxv10i32(<vscale x 10 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 10 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> poison, <vscale x 10 x i32> zeroinitializer
-  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> splat (i32 5), <vscale x 10 x i1> %m, i32 %evl)
   ret <vscale x 10 x i32> %v
 }
 
@@ -1404,11 +1226,7 @@ define <vscale x 10 x i32> @vor_vi_nxv10i32_unmasked(<vscale x 10 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 10 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> poison, <vscale x 10 x i32> zeroinitializer
-  %head = insertelement <vscale x 10 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> poison, <vscale x 10 x i32> zeroinitializer
-  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> splat (i32 5), <vscale x 10 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 10 x i32> %v
 }
 
@@ -1430,9 +1248,7 @@ define <vscale x 16 x i32> @vor_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1456,9 +1272,7 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1468,9 +1282,7 @@ define <vscale x 16 x i32> @vor_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1480,11 +1292,7 @@ define <vscale x 16 x i32> @vor_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1506,9 +1314,7 @@ define <vscale x 1 x i64> @vor_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1560,9 +1366,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1572,9 +1376,7 @@ define <vscale x 1 x i64> @vor_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1584,11 +1386,7 @@ define <vscale x 1 x i64> @vor_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1610,9 +1408,7 @@ define <vscale x 2 x i64> @vor_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1664,9 +1460,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1676,9 +1470,7 @@ define <vscale x 2 x i64> @vor_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1688,11 +1480,7 @@ define <vscale x 2 x i64> @vor_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1714,9 +1502,7 @@ define <vscale x 4 x i64> @vor_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1768,9 +1554,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1780,9 +1564,7 @@ define <vscale x 4 x i64> @vor_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1792,11 +1574,7 @@ define <vscale x 4 x i64> @vor_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1818,9 +1596,7 @@ define <vscale x 8 x i64> @vor_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1872,9 +1648,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1884,9 +1658,7 @@ define <vscale x 8 x i64> @vor_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1896,10 +1668,6 @@ define <vscale x 8 x i64> @vor_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vor.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
index cc13a97ddce0e8..136f6e7bc99909 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
@@ -26,10 +26,8 @@ define <2 x double> @test_vp_reverse_v2f64(<2 x double> %src, i32 zeroext %evl)
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %dst = call <2 x double> @llvm.experimental.vp.reverse.v2f64(<2 x double> %src, <2 x i1> %allones, i32 %evl)
+  %dst = call <2 x double> @llvm.experimental.vp.reverse.v2f64(<2 x double> %src, <2 x i1> splat (i1 1), i32 %evl)
   ret <2 x double> %dst
 }
 
@@ -57,10 +55,8 @@ define <4 x float> @test_vp_reverse_v4f32(<4 x float> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %dst = call <4 x float> @llvm.experimental.vp.reverse.v4f32(<4 x float> %src, <4 x i1> %allones, i32 %evl)
+  %dst = call <4 x float> @llvm.experimental.vp.reverse.v4f32(<4 x float> %src, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x float> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
index adc1ca6c858683..b235990ab5dd09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
@@ -25,10 +25,8 @@ define <vscale x 1 x double> @test_vp_reverse_nxv1f64(<vscale x 1 x double> %src
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
 
-  %dst = call <vscale x 1 x double> @llvm.experimental.vp.reverse.nxv1f64(<vscale x 1 x double> %src, <vscale x 1 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 1 x double> @llvm.experimental.vp.reverse.nxv1f64(<vscale x 1 x double> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 1 x double> %dst
 }
 
@@ -56,10 +54,8 @@ define <vscale x 2 x float> @test_vp_reverse_nxv2f32(<vscale x 2 x float> %src,
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %dst = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %src, <vscale x 2 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 2 x float> %dst
 }
 
@@ -87,10 +83,8 @@ define <vscale x 2 x double> @test_vp_reverse_nxv2f64(<vscale x 2 x double> %src
 ; CHECK-NEXT:    vrgather.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %dst = call <vscale x 2 x double> @llvm.experimental.vp.reverse.nxv2f64(<vscale x 2 x double> %src, <vscale x 2 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 2 x double> @llvm.experimental.vp.reverse.nxv2f64(<vscale x 2 x double> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 2 x double> %dst
 }
 
@@ -118,10 +112,8 @@ define <vscale x 4 x float> @test_vp_reverse_nxv4f32(<vscale x 4 x float> %src,
 ; CHECK-NEXT:    vrgather.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x float> %dst
 }
 
@@ -149,10 +141,8 @@ define <vscale x 4 x double> @test_vp_reverse_nxv4f64(<vscale x 4 x double> %src
 ; CHECK-NEXT:    vrgather.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x double> @llvm.experimental.vp.reverse.nxv4f64(<vscale x 4 x double> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x double> @llvm.experimental.vp.reverse.nxv4f64(<vscale x 4 x double> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x double> %dst
 }
 
@@ -180,10 +170,8 @@ define <vscale x 8 x float> @test_vp_reverse_nxv8f32(<vscale x 8 x float> %src,
 ; CHECK-NEXT:    vrgather.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x float> @llvm.experimental.vp.reverse.nxv8f32(<vscale x 8 x float> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x float> @llvm.experimental.vp.reverse.nxv8f32(<vscale x 8 x float> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x float> %dst
 }
 
@@ -211,10 +199,8 @@ define <vscale x 8 x double> @test_vp_reverse_nxv8f64(<vscale x 8 x double> %src
 ; CHECK-NEXT:    vrgather.vv v16, v8, v24
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x double> @llvm.experimental.vp.reverse.nxv8f64(<vscale x 8 x double> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x double> @llvm.experimental.vp.reverse.nxv8f64(<vscale x 8 x double> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x double> %dst
 }
 
@@ -242,10 +228,8 @@ define <vscale x 16 x float> @test_vp_reverse_nxv16f32(<vscale x 16 x float> %sr
 ; CHECK-NEXT:    vrgather.vv v16, v8, v24
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %dst = call <vscale x 16 x float> @llvm.experimental.vp.reverse.nxv16f32(<vscale x 16 x float> %src, <vscale x 16 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 16 x float> @llvm.experimental.vp.reverse.nxv16f32(<vscale x 16 x float> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 16 x float> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll
index d7fc8838f430d1..27f16f0285e12f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll
@@ -26,10 +26,8 @@ define <2 x i64> @test_vp_reverse_v2i64(<2 x i64> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %dst = call <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64> %src, <2 x i1> %allones, i32 %evl)
+  %dst = call <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64> %src, <2 x i1> splat (i1 1), i32 %evl)
   ret <2 x i64> %dst
 }
 
@@ -57,10 +55,8 @@ define <4 x i32> @test_vp_reverse_v4i32(<4 x i32> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %dst = call <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32> %src, <4 x i1> %allones, i32 %evl)
+  %dst = call <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32> %src, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i32> %dst
 }
 
@@ -88,10 +84,8 @@ define <8 x i16> @test_vp_reverse_v8i16(<8 x i16> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %dst = call <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16> %src, <8 x i1> %allones, i32 %evl)
+  %dst = call <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16> %src, <8 x i1> splat (i1 1), i32 %evl)
   ret <8 x i16> %dst
 }
 
@@ -121,10 +115,8 @@ define <16 x i8> @test_vp_reverse_v16i8(<16 x i8> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgatherei16.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %dst = call <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8> %src, <16 x i1> %allones, i32 %evl)
+  %dst = call <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8> %src, <16 x i1> splat (i1 1), i32 %evl)
   ret <16 x i8> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 47df1b005a0f85..8b1660283cb7d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -25,10 +25,8 @@ define <vscale x 1 x i64> @test_vp_reverse_nxv1i64(<vscale x 1 x i64> %src, i32
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
 
-  %dst = call <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64> %src, <vscale x 1 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 1 x i64> %dst
 }
 
@@ -56,10 +54,8 @@ define <vscale x 2 x i32> @test_vp_reverse_nxv2i32(<vscale x 2 x i32> %src, i32
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %dst = call <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32> %src, <vscale x 2 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 2 x i32> %dst
 }
 
@@ -87,10 +83,8 @@ define <vscale x 4 x i16> @test_vp_reverse_nxv4i16(<vscale x 4 x i16> %src, i32
 ; CHECK-NEXT:    vrgather.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i16> %dst
 }
 
@@ -120,10 +114,8 @@ define <vscale x 8 x i8> @test_vp_reverse_nxv8i8(<vscale x 8 x i8> %src, i32 zer
 ; CHECK-NEXT:    vrgatherei16.vv v9, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x i8> %dst
 }
 
@@ -151,10 +143,8 @@ define <vscale x 2 x i64> @test_vp_reverse_nxv2i64(<vscale x 2 x i64> %src, i32
 ; CHECK-NEXT:    vrgather.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %dst = call <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64> %src, <vscale x 2 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 2 x i64> %dst
 }
 
@@ -182,10 +172,8 @@ define <vscale x 4 x i32> @test_vp_reverse_nxv4i32(<vscale x 4 x i32> %src, i32
 ; CHECK-NEXT:    vrgather.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i32> %dst
 }
 
@@ -213,10 +201,8 @@ define <vscale x 8 x i16> @test_vp_reverse_nxv8i16(<vscale x 8 x i16> %src, i32
 ; CHECK-NEXT:    vrgather.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x i16> %dst
 }
 
@@ -246,10 +232,8 @@ define <vscale x 16 x i8> @test_vp_reverse_nxv16i8(<vscale x 16 x i8> %src, i32
 ; CHECK-NEXT:    vrgatherei16.vv v10, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %dst = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> %src, <vscale x 16 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 16 x i8> %dst
 }
 
@@ -277,10 +261,8 @@ define <vscale x 4 x i64> @test_vp_reverse_nxv4i64(<vscale x 4 x i64> %src, i32
 ; CHECK-NEXT:    vrgather.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i64> %dst
 }
 
@@ -308,10 +290,8 @@ define <vscale x 8 x i32> @test_vp_reverse_nxv8i32(<vscale x 8 x i32> %src, i32
 ; CHECK-NEXT:    vrgather.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x i32> %dst
 }
 
@@ -339,10 +319,8 @@ define <vscale x 16 x i16> @test_vp_reverse_nxv16i16(<vscale x 16 x i16> %src, i
 ; CHECK-NEXT:    vrgather.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %dst = call <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16> %src, <vscale x 16 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 16 x i16> %dst
 }
 
@@ -372,10 +350,8 @@ define <vscale x 32 x i8> @test_vp_reverse_nxv32i8(<vscale x 32 x i8> %src, i32
 ; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
 
-  %dst = call <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8> %src, <vscale x 32 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 32 x i8> %dst
 }
 
@@ -403,10 +379,8 @@ define <vscale x 8 x i64> @test_vp_reverse_nxv8i64(<vscale x 8 x i64> %src, i32
 ; CHECK-NEXT:    vrgather.vv v16, v8, v24
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x i64> %dst
 }
 
@@ -434,10 +408,8 @@ define <vscale x 16 x i32> @test_vp_reverse_nxv16i32(<vscale x 16 x i32> %src, i
 ; CHECK-NEXT:    vrgather.vv v16, v8, v24
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %dst = call <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32> %src, <vscale x 16 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 16 x i32> %dst
 }
 
@@ -465,10 +437,8 @@ define <vscale x 32 x i16> @test_vp_reverse_nxv32i16(<vscale x 32 x i16> %src, i
 ; CHECK-NEXT:    vrgather.vv v16, v8, v24
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
 
-  %dst = call <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16> %src, <vscale x 32 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 32 x i16> %dst
 }
 
@@ -510,10 +480,8 @@ define <vscale x 64 x i8> @test_vp_reverse_nxv64i8(<vscale x 64 x i8> %src, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v24, a1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
 
-  %dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 64 x i8> %dst
 }
 
@@ -561,10 +529,8 @@ define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i
 ; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 80
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 128 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> undef, <vscale x 128 x i32> zeroinitializer
 
-  %dst = call <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8> %src, <vscale x 128 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8> %src, <vscale x 128 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 128 x i8> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
index fd608c858650e3..a30ebf2d33b502 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
@@ -34,10 +34,8 @@ define <2 x i1> @test_vp_reverse_v2i1(<2 x i1> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> %allones, i32 %evl)
+  %dst = call <2 x i1> @llvm.experimental.vp.reverse.v2i1(<2 x i1> %src, <2 x i1> splat (i1 1), i32 %evl)
   ret <2 x i1> %dst
 }
 
@@ -73,10 +71,8 @@ define <4 x i1> @test_vp_reverse_v4i1(<4 x i1> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> %allones, i32 %evl)
+  %dst = call <4 x i1> @llvm.experimental.vp.reverse.v4i1(<4 x i1> %src, <4 x i1> splat (i1 1), i32 %evl)
   ret <4 x i1> %dst
 }
 
@@ -112,10 +108,8 @@ define <8 x i1> @test_vp_reverse_v8i1(<8 x i1> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> %allones, i32 %evl)
+  %dst = call <8 x i1> @llvm.experimental.vp.reverse.v8i1(<8 x i1> %src, <8 x i1> splat (i1 1), i32 %evl)
   ret <8 x i1> %dst
 }
 
@@ -151,10 +145,8 @@ define <16 x i1> @test_vp_reverse_v16i1(<16 x i1> %src, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrgatherei16.vv v11, v10, v8
 ; CHECK-NEXT:    vmsne.vi v0, v11, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> %allones, i32 %evl)
+  %dst = call <16 x i1> @llvm.experimental.vp.reverse.v16i1(<16 x i1> %src, <16 x i1> splat (i1 1), i32 %evl)
   ret <16 x i1> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
index 29917141fffed8..ceb6a164e20df6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
@@ -33,10 +33,8 @@ define <vscale x 1 x i1> @test_vp_reverse_nxv1i1(<vscale x 1 x i1> %src, i32 zer
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
 
-  %dst = call <vscale x 1 x i1> @llvm.experimental.vp.reverse.nxv1i1(<vscale x 1 x i1> %src, <vscale x 1 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 1 x i1> @llvm.experimental.vp.reverse.nxv1i1(<vscale x 1 x i1> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 1 x i1> %dst
 }
 
@@ -72,10 +70,8 @@ define <vscale x 2 x i1> @test_vp_reverse_nxv2i1(<vscale x 2 x i1> %src, i32 zer
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %dst = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %src, <vscale x 2 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 2 x i1> %dst
 }
 
@@ -111,10 +107,8 @@ define <vscale x 4 x i1> @test_vp_reverse_nxv4i1(<vscale x 4 x i1> %src, i32 zer
 ; CHECK-NEXT:    vrgatherei16.vv v10, v9, v8
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %dst = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> %src, <vscale x 4 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 4 x i1> @llvm.experimental.vp.reverse.nxv4i1(<vscale x 4 x i1> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i1> %dst
 }
 
@@ -150,10 +144,8 @@ define <vscale x 8 x i1> @test_vp_reverse_nxv8i1(<vscale x 8 x i1> %src, i32 zer
 ; CHECK-NEXT:    vrgatherei16.vv v11, v10, v8
 ; CHECK-NEXT:    vmsne.vi v0, v11, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %dst = call <vscale x 8 x i1> @llvm.experimental.vp.reverse.nxv8i1(<vscale x 8 x i1> %src, <vscale x 8 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 8 x i1> @llvm.experimental.vp.reverse.nxv8i1(<vscale x 8 x i1> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 8 x i1> %dst
 }
 
@@ -190,10 +182,8 @@ define <vscale x 16 x i1> @test_vp_reverse_nxv16i1(<vscale x 16 x i1> %src, i32
 ; CHECK-NEXT:    vrgatherei16.vv v14, v12, v8
 ; CHECK-NEXT:    vmsne.vi v0, v14, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %dst = call <vscale x 16 x i1> @llvm.experimental.vp.reverse.nxv16i1(<vscale x 16 x i1> %src, <vscale x 16 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 16 x i1> @llvm.experimental.vp.reverse.nxv16i1(<vscale x 16 x i1> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 16 x i1> %dst
 }
 
@@ -230,10 +220,8 @@ define <vscale x 32 x i1> @test_vp_reverse_nxv32i1(<vscale x 32 x i1> %src, i32
 ; CHECK-NEXT:    vrgatherei16.vv v20, v16, v8
 ; CHECK-NEXT:    vmsne.vi v0, v20, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
 
-  %dst = call <vscale x 32 x i1> @llvm.experimental.vp.reverse.nxv32i1(<vscale x 32 x i1> %src, <vscale x 32 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 32 x i1> @llvm.experimental.vp.reverse.nxv32i1(<vscale x 32 x i1> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 32 x i1> %dst
 }
 
@@ -285,10 +273,8 @@ define <vscale x 64 x i1> @test_vp_reverse_nxv64i1(<vscale x 64 x i1> %src, i32
 ; CHECK-NEXT:    vslidedown.vx v8, v16, a1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
 
-  %dst = call <vscale x 64 x i1> @llvm.experimental.vp.reverse.nxv64i1(<vscale x 64 x i1> %src, <vscale x 64 x i1> %allones, i32 %evl)
+  %dst = call <vscale x 64 x i1> @llvm.experimental.vp.reverse.nxv64i1(<vscale x 64 x i1> %src, <vscale x 64 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 64 x i1> %dst
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll
index f7c8c251e197be..494bf46050ccb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll
@@ -19,10 +19,8 @@ define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i64> %v
 }
 
@@ -35,10 +33,8 @@ define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64>
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i64> %v
 }
 
@@ -64,10 +60,8 @@ define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i32> %v
 }
 
@@ -80,10 +74,8 @@ define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32>
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i32> %v
 }
 
@@ -109,10 +101,8 @@ define <8 x i16> @test_vp_splice_v8i16(<8 x i16> %va, <8 x i16> %vb, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i16> %v
 }
 
@@ -125,10 +115,8 @@ define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16>
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 -5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i16> %v
 }
 
@@ -154,10 +142,8 @@ define <16 x i8> @test_vp_splice_v16i8(<16 x i8> %va, <16 x i8> %vb, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i8> %v
 }
 
@@ -170,10 +156,8 @@ define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8>
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 -5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 -5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i8> %v
 }
 
@@ -199,10 +183,8 @@ define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x double> %v
 }
 
@@ -215,10 +197,8 @@ define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x double> %v
 }
 
@@ -244,10 +224,8 @@ define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x float> %v
 }
 
@@ -260,10 +238,8 @@ define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x f
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x float> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index 9579973aee0d6a..ce0ae2022885ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -26,10 +26,8 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i1> %v
 }
 
@@ -52,10 +50,8 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
 
-  %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -5, <2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i1> %v
 }
 
@@ -103,10 +99,8 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i1> %v
 }
 
@@ -129,10 +123,8 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
 
-  %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -5, <4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i1> %v
 }
 
@@ -180,10 +172,8 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i1> %v
 }
 
@@ -206,10 +196,8 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb,
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
 
-  %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 -5, <8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i1> %v
 }
 
@@ -257,10 +245,8 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i1> %v
 }
 
@@ -283,10 +269,8 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1>
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
 
-  %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 -5, <16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 -5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i1> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 4eaadb3c24fbb9..668cff23429366 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -29,10 +29,8 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
 
-  %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i1> %v
 }
 
@@ -55,10 +53,8 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
 
-  %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i1> %v
 }
 
@@ -106,10 +102,8 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i1> %v
 }
 
@@ -132,10 +126,8 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
 
-  %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i1> %v
 }
 
@@ -183,10 +175,8 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i1> %v
 }
 
@@ -209,10 +199,8 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 
-  %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 -5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i1> %v
 }
 
@@ -260,10 +248,8 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i1> %v
 }
 
@@ -286,10 +272,8 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 
-  %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 -5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i1> %v
 }
 
@@ -337,10 +321,8 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 16 x i1> %v
 }
 
@@ -363,10 +345,8 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
 ; CHECK-NEXT:    vslideup.vi v8, v10, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
 
-  %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 -5, <vscale x 16 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 -5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 16 x i1> %v
 }
 
@@ -415,10 +395,8 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
 
-  %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 32 x i1> %v
 }
 
@@ -441,10 +419,8 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
 ; CHECK-NEXT:    vslideup.vi v8, v12, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
 
-  %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 -5, <vscale x 32 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 -5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 32 x i1> %v
 }
 
@@ -493,10 +469,8 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
 
-  %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 64 x i1> %v
 }
 
@@ -519,10 +493,8 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
 ; CHECK-NEXT:    vslideup.vi v8, v16, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
 
-  %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 -5, <vscale x 64 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 -5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 64 x i1> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index 7d85370e390b00..a4f91c3e7c99e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -23,10 +23,7 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i64> %v
 }
 
@@ -39,10 +36,7 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i64> %v
 }
 
@@ -68,10 +62,7 @@ define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
-
-  %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i64> %v
 }
 
@@ -84,10 +75,7 @@ define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
-
-  %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i64> %v
 }
 
@@ -113,10 +101,7 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i32> %v
 }
 
@@ -129,10 +114,7 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i32> %v
 }
 
@@ -158,10 +140,7 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
-
-  %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i16> %v
 }
 
@@ -174,10 +153,7 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
-
-  %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 -5, <vscale x 4 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i16> %v
 }
 
@@ -203,10 +179,7 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-
-  %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i8> %v
 }
 
@@ -219,10 +192,7 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
-
-  %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 -5, <vscale x 8 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i8> %v
 }
 
@@ -248,10 +218,7 @@ define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
-
-  %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x double> %v
 }
 
@@ -264,10 +231,7 @@ define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
-
-  %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x double> %v
 }
 
@@ -293,10 +257,7 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x float> %v
 }
 
@@ -309,10 +270,7 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
-  %allones = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-
-  %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> %allones, i32 %evla, i32 %evlb)
+  %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x float> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index a5c305d5ac8223..c86fee6305931f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -204,9 +204,7 @@ define <vscale x 4 x i8> @vpgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, i32
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv1r.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -500,9 +498,7 @@ define <vscale x 4 x i16> @vpgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, i
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -729,9 +725,7 @@ define <vscale x 4 x i32> @vpgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, i
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -988,9 +982,7 @@ define <vscale x 4 x i64> @vpgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, i
 ; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1319,9 +1311,7 @@ define <vscale x 4 x half> @vpgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs,
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x half> @llvm.vp.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -1506,9 +1496,7 @@ define <vscale x 4 x float> @vpgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs,
 ; RV64-NEXT:    vluxei64.v v12, (zero), v8
 ; RV64-NEXT:    vmv.v.v v8, v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -1765,9 +1753,7 @@ define <vscale x 4 x double> @vpgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs
 ; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; RV64-NEXT:    vluxei64.v v8, (zero), v8
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  %v = call <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index c203fcb903e56c..f07c16476c56a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -22,9 +22,7 @@ define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
@@ -82,9 +80,7 @@ define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
@@ -118,9 +114,7 @@ define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
@@ -190,9 +184,7 @@ define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
@@ -226,9 +218,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
@@ -298,9 +288,7 @@ define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
@@ -382,9 +370,7 @@ define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
@@ -430,9 +416,7 @@ define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(ptr %ptr, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vle64.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 4f67aac2d2d2ba..76efdda15bf77b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -66,9 +66,7 @@ define <vscale x 1 x i8> @vpmerge_vi_nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %vb, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -103,9 +101,7 @@ define <vscale x 2 x i8> @vpmerge_vi_nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %vb, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -140,9 +136,7 @@ define <vscale x 3 x i8> @vpmerge_vi_nxv3i8(<vscale x 3 x i8> %vb, <vscale x 3 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.merge.nxv3i8(<vscale x 3 x i1> %m, <vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.merge.nxv3i8(<vscale x 3 x i1> %m, <vscale x 3 x i8> splat (i8 2), <vscale x 3 x i8> %vb, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -177,9 +171,7 @@ define <vscale x 4 x i8> @vpmerge_vi_nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %vb, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -214,9 +206,7 @@ define <vscale x 8 x i7> @vpmerge_vi_nxv8i7(<vscale x 8 x i7> %vb, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i7> poison, i7 2, i32 0
-  %va = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i7> @llvm.vp.merge.nxv8i7(<vscale x 8 x i1> %m, <vscale x 8 x i7> %va, <vscale x 8 x i7> %vb, i32 %evl)
+  %v = call <vscale x 8 x i7> @llvm.vp.merge.nxv8i7(<vscale x 8 x i1> %m, <vscale x 8 x i7> splat (i7 2), <vscale x 8 x i7> %vb, i32 %evl)
   ret <vscale x 8 x i7> %v
 }
 
@@ -251,9 +241,7 @@ define <vscale x 8 x i8> @vpmerge_vi_nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %vb, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -288,9 +276,7 @@ define <vscale x 16 x i8> @vpmerge_vi_nxv16i8(<vscale x 16 x i8> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %vb, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -325,9 +311,7 @@ define <vscale x 32 x i8> @vpmerge_vi_nxv32i8(<vscale x 32 x i8> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %vb, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -362,9 +346,7 @@ define <vscale x 64 x i8> @vpmerge_vi_nxv64i8(<vscale x 64 x i8> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %vb, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -464,9 +446,7 @@ define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 2, i32 0
-  %va = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> splat (i8 2), <vscale x 128 x i8> %vb, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -501,9 +481,7 @@ define <vscale x 1 x i16> @vpmerge_vi_nxv1i16(<vscale x 1 x i16> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %vb, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -538,9 +516,7 @@ define <vscale x 2 x i16> @vpmerge_vi_nxv2i16(<vscale x 2 x i16> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %vb, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -575,9 +551,7 @@ define <vscale x 4 x i16> @vpmerge_vi_nxv4i16(<vscale x 4 x i16> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %vb, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -612,9 +586,7 @@ define <vscale x 8 x i16> @vpmerge_vi_nxv8i16(<vscale x 8 x i16> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %vb, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -649,9 +621,7 @@ define <vscale x 16 x i16> @vpmerge_vi_nxv16i16(<vscale x 16 x i16> %vb, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %vb, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -686,9 +656,7 @@ define <vscale x 32 x i16> @vpmerge_vi_nxv32i16(<vscale x 32 x i16> %vb, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
-  %va = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %vb, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -723,9 +691,7 @@ define <vscale x 1 x i32> @vpmerge_vi_nxv1i32(<vscale x 1 x i32> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
-  %va = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %vb, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -760,9 +726,7 @@ define <vscale x 2 x i32> @vpmerge_vi_nxv2i32(<vscale x 2 x i32> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %va = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %vb, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -797,9 +761,7 @@ define <vscale x 4 x i32> @vpmerge_vi_nxv4i32(<vscale x 4 x i32> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %va = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %vb, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -834,9 +796,7 @@ define <vscale x 8 x i32> @vpmerge_vi_nxv8i32(<vscale x 8 x i32> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
-  %va = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %vb, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -871,9 +831,7 @@ define <vscale x 16 x i32> @vpmerge_vi_nxv16i32(<vscale x 16 x i32> %vb, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %va = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %vb, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -920,9 +878,7 @@ define <vscale x 1 x i64> @vpmerge_vi_nxv1i64(<vscale x 1 x i64> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %va = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %vb, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -969,9 +925,7 @@ define <vscale x 2 x i64> @vpmerge_vi_nxv2i64(<vscale x 2 x i64> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %va = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %vb, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1018,9 +972,7 @@ define <vscale x 4 x i64> @vpmerge_vi_nxv4i64(<vscale x 4 x i64> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %va = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %vb, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1067,9 +1019,7 @@ define <vscale x 8 x i64> @vpmerge_vi_nxv8i64(<vscale x 8 x i64> %vb, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 2, v0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %va = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %vb, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index a907e149b167fb..38e4aab4deb34a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -145,9 +145,7 @@ define void @vpscatter_truemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x ptr
 ; RV64-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -302,9 +300,7 @@ define void @vpscatter_truemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x p
 ; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -604,9 +600,7 @@ define void @vpscatter_truemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x p
 ; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -858,9 +852,7 @@ define void @vpscatter_truemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x p
 ; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1180,9 +1172,7 @@ define void @vpscatter_truemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x
 ; RV64-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1361,9 +1351,7 @@ define void @vpscatter_truemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x
 ; RV64-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
@@ -1615,9 +1603,7 @@ define void @vpscatter_truemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4
 ; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; RV64-NEXT:    vsoxei64.v v8, (zero), v12
 ; RV64-NEXT:    ret
-  %mhead = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  call void @llvm.vp.scatter.nxv4f64.nxv4p0(<vscale x 4 x double> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mtrue, i32 %evl)
+  call void @llvm.vp.scatter.nxv4f64.nxv4p0(<vscale x 4 x double> %val, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> splat (i1 1), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index 8b27a61e243db1..c12fc0497742ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -358,9 +358,7 @@ define void @vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, ptr %ptr, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, ptr %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  call void @llvm.vp.store.nxv1i8.p0(<vscale x 1 x i8> %val, ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret void
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index 1eafb3bdfed2ce..3a6ae5fdb21073 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -39,9 +39,7 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = srem <vscale x 1 x i8> %va, %splat
+  %vc = srem <vscale x 1 x i8> %va, splat (i8 -7)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -93,9 +91,7 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = srem <vscale x 2 x i8> %va, %splat
+  %vc = srem <vscale x 2 x i8> %va, splat (i8 -7)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -147,9 +143,7 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = srem <vscale x 4 x i8> %va, %splat
+  %vc = srem <vscale x 4 x i8> %va, splat (i8 -7)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -201,9 +195,7 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = srem <vscale x 8 x i8> %va, %splat
+  %vc = srem <vscale x 8 x i8> %va, splat (i8 -7)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -255,9 +247,7 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = srem <vscale x 16 x i8> %va, %splat
+  %vc = srem <vscale x 16 x i8> %va, splat (i8 -7)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -309,9 +299,7 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = srem <vscale x 32 x i8> %va, %splat
+  %vc = srem <vscale x 32 x i8> %va, splat (i8 -7)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -363,9 +351,7 @@ define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = srem <vscale x 64 x i8> %va, %splat
+  %vc = srem <vscale x 64 x i8> %va, splat (i8 -7)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -404,9 +390,7 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = srem <vscale x 1 x i16> %va, %splat
+  %vc = srem <vscale x 1 x i16> %va, splat (i16 -7)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -458,9 +442,7 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = srem <vscale x 2 x i16> %va, %splat
+  %vc = srem <vscale x 2 x i16> %va, splat (i16 -7)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -512,9 +494,7 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = srem <vscale x 4 x i16> %va, %splat
+  %vc = srem <vscale x 4 x i16> %va, splat (i16 -7)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -566,9 +546,7 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = srem <vscale x 8 x i16> %va, %splat
+  %vc = srem <vscale x 8 x i16> %va, splat (i16 -7)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -620,9 +598,7 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = srem <vscale x 16 x i16> %va, %splat
+  %vc = srem <vscale x 16 x i16> %va, splat (i16 -7)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -674,9 +650,7 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = srem <vscale x 32 x i16> %va, %splat
+  %vc = srem <vscale x 32 x i16> %va, splat (i16 -7)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -730,9 +704,7 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = srem <vscale x 1 x i32> %va, %splat
+  %vc = srem <vscale x 1 x i32> %va, splat (i32 -7)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -786,9 +758,7 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = srem <vscale x 2 x i32> %va, %splat
+  %vc = srem <vscale x 2 x i32> %va, splat (i32 -7)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -842,9 +812,7 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = srem <vscale x 4 x i32> %va, %splat
+  %vc = srem <vscale x 4 x i32> %va, splat (i32 -7)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -898,9 +866,7 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = srem <vscale x 8 x i32> %va, %splat
+  %vc = srem <vscale x 8 x i32> %va, splat (i32 -7)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -954,9 +920,7 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = srem <vscale x 16 x i32> %va, %splat
+  %vc = srem <vscale x 16 x i32> %va, splat (i32 -7)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1039,9 +1003,7 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = srem <vscale x 1 x i64> %va, %splat
+  %vc = srem <vscale x 1 x i64> %va, splat (i64 -7)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1124,9 +1086,7 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = srem <vscale x 2 x i64> %va, %splat
+  %vc = srem <vscale x 2 x i64> %va, splat (i64 -7)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1209,9 +1169,7 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = srem <vscale x 4 x i64> %va, %splat
+  %vc = srem <vscale x 4 x i64> %va, splat (i64 -7)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1294,8 +1252,6 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = srem <vscale x 8 x i64> %va, %splat
+  %vc = srem <vscale x 8 x i64> %va, splat (i64 -7)
   ret <vscale x 8 x i64> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
index 74a8fce1fcd7f2..cf85fd827b51f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
@@ -39,9 +39,7 @@ define <vscale x 1 x i8> @vrem_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -65,9 +63,7 @@ define <vscale x 1 x i8> @vrem_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -89,9 +85,7 @@ define <vscale x 2 x i8> @vrem_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -115,9 +109,7 @@ define <vscale x 2 x i8> @vrem_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -151,9 +143,7 @@ define <vscale x 4 x i8> @vrem_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -177,9 +167,7 @@ define <vscale x 4 x i8> @vrem_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -201,9 +189,7 @@ define <vscale x 8 x i8> @vrem_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -227,9 +213,7 @@ define <vscale x 8 x i8> @vrem_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -251,9 +235,7 @@ define <vscale x 16 x i8> @vrem_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -277,9 +259,7 @@ define <vscale x 16 x i8> @vrem_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -301,9 +281,7 @@ define <vscale x 32 x i8> @vrem_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -327,9 +305,7 @@ define <vscale x 32 x i8> @vrem_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -351,9 +327,7 @@ define <vscale x 64 x i8> @vrem_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -377,9 +351,7 @@ define <vscale x 64 x i8> @vrem_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -401,9 +373,7 @@ define <vscale x 1 x i16> @vrem_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -427,9 +397,7 @@ define <vscale x 1 x i16> @vrem_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -451,9 +419,7 @@ define <vscale x 2 x i16> @vrem_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -477,9 +443,7 @@ define <vscale x 2 x i16> @vrem_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -501,9 +465,7 @@ define <vscale x 4 x i16> @vrem_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -527,9 +489,7 @@ define <vscale x 4 x i16> @vrem_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -551,9 +511,7 @@ define <vscale x 8 x i16> @vrem_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -577,9 +535,7 @@ define <vscale x 8 x i16> @vrem_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -601,9 +557,7 @@ define <vscale x 16 x i16> @vrem_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -627,9 +581,7 @@ define <vscale x 16 x i16> @vrem_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -651,9 +603,7 @@ define <vscale x 32 x i16> @vrem_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -677,9 +627,7 @@ define <vscale x 32 x i16> @vrem_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -701,9 +649,7 @@ define <vscale x 1 x i32> @vrem_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -727,9 +673,7 @@ define <vscale x 1 x i32> @vrem_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -751,9 +695,7 @@ define <vscale x 2 x i32> @vrem_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -777,9 +719,7 @@ define <vscale x 2 x i32> @vrem_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -801,9 +741,7 @@ define <vscale x 4 x i32> @vrem_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -827,9 +765,7 @@ define <vscale x 4 x i32> @vrem_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -851,9 +787,7 @@ define <vscale x 8 x i32> @vrem_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -877,9 +811,7 @@ define <vscale x 8 x i32> @vrem_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -901,9 +833,7 @@ define <vscale x 16 x i32> @vrem_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -927,9 +857,7 @@ define <vscale x 16 x i32> @vrem_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -951,9 +879,7 @@ define <vscale x 1 x i64> @vrem_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1005,9 +931,7 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1029,9 +953,7 @@ define <vscale x 2 x i64> @vrem_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1083,9 +1005,7 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1107,9 +1027,7 @@ define <vscale x 4 x i64> @vrem_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1161,9 +1079,7 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1185,9 +1101,7 @@ define <vscale x 8 x i64> @vrem_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1239,8 +1153,6 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
index 428d071cac399e..ed40f5af4fa4ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vremu_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = urem <vscale x 1 x i8> %va, %splat
+  %vc = urem <vscale x 1 x i8> %va, splat (i8 -7)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -74,9 +72,7 @@ define <vscale x 2 x i8> @vremu_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = urem <vscale x 2 x i8> %va, %splat
+  %vc = urem <vscale x 2 x i8> %va, splat (i8 -7)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -112,9 +108,7 @@ define <vscale x 4 x i8> @vremu_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = urem <vscale x 4 x i8> %va, %splat
+  %vc = urem <vscale x 4 x i8> %va, splat (i8 -7)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -150,9 +144,7 @@ define <vscale x 8 x i8> @vremu_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = urem <vscale x 8 x i8> %va, %splat
+  %vc = urem <vscale x 8 x i8> %va, splat (i8 -7)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -188,9 +180,7 @@ define <vscale x 16 x i8> @vremu_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = urem <vscale x 16 x i8> %va, %splat
+  %vc = urem <vscale x 16 x i8> %va, splat (i8 -7)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -226,9 +216,7 @@ define <vscale x 32 x i8> @vremu_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = urem <vscale x 32 x i8> %va, %splat
+  %vc = urem <vscale x 32 x i8> %va, splat (i8 -7)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -264,9 +252,7 @@ define <vscale x 64 x i8> @vremu_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = urem <vscale x 64 x i8> %va, %splat
+  %vc = urem <vscale x 64 x i8> %va, splat (i8 -7)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -303,9 +289,7 @@ define <vscale x 1 x i16> @vremu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = urem <vscale x 1 x i16> %va, %splat
+  %vc = urem <vscale x 1 x i16> %va, splat (i16 -7)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -342,9 +326,7 @@ define <vscale x 2 x i16> @vremu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = urem <vscale x 2 x i16> %va, %splat
+  %vc = urem <vscale x 2 x i16> %va, splat (i16 -7)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -381,9 +363,7 @@ define <vscale x 4 x i16> @vremu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = urem <vscale x 4 x i16> %va, %splat
+  %vc = urem <vscale x 4 x i16> %va, splat (i16 -7)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -420,9 +400,7 @@ define <vscale x 8 x i16> @vremu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = urem <vscale x 8 x i16> %va, %splat
+  %vc = urem <vscale x 8 x i16> %va, splat (i16 -7)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -459,9 +437,7 @@ define <vscale x 16 x i16> @vremu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = urem <vscale x 16 x i16> %va, %splat
+  %vc = urem <vscale x 16 x i16> %va, splat (i16 -7)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -498,9 +474,7 @@ define <vscale x 32 x i16> @vremu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = urem <vscale x 32 x i16> %va, %splat
+  %vc = urem <vscale x 32 x i16> %va, splat (i16 -7)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -537,9 +511,7 @@ define <vscale x 1 x i32> @vremu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = urem <vscale x 1 x i32> %va, %splat
+  %vc = urem <vscale x 1 x i32> %va, splat (i32 -7)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -576,9 +548,7 @@ define <vscale x 2 x i32> @vremu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = urem <vscale x 2 x i32> %va, %splat
+  %vc = urem <vscale x 2 x i32> %va, splat (i32 -7)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -615,9 +585,7 @@ define <vscale x 4 x i32> @vremu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = urem <vscale x 4 x i32> %va, %splat
+  %vc = urem <vscale x 4 x i32> %va, splat (i32 -7)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -654,9 +622,7 @@ define <vscale x 8 x i32> @vremu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = urem <vscale x 8 x i32> %va, %splat
+  %vc = urem <vscale x 8 x i32> %va, splat (i32 -7)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -693,9 +659,7 @@ define <vscale x 16 x i32> @vremu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = urem <vscale x 16 x i32> %va, %splat
+  %vc = urem <vscale x 16 x i32> %va, splat (i32 -7)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -773,9 +737,7 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = urem <vscale x 1 x i64> %va, %splat
+  %vc = urem <vscale x 1 x i64> %va, splat (i64 -7)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -786,9 +748,7 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = urem <vscale x 1 x i64> %va, %splat
+  %vc = urem <vscale x 1 x i64> %va, splat (i64 16)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -803,9 +763,7 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_2(<vscale x 1 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v9, v9, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i64> %splat, %vb
+  %vc = shl <vscale x 1 x i64> splat (i64 16), %vb
   %vd = urem <vscale x 1 x i64> %va, %vc
   ret <vscale x 1 x i64> %vd
 }
@@ -884,9 +842,7 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = urem <vscale x 2 x i64> %va, %splat
+  %vc = urem <vscale x 2 x i64> %va, splat (i64 -7)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -897,9 +853,7 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = urem <vscale x 2 x i64> %va, %splat
+  %vc = urem <vscale x 2 x i64> %va, splat (i64 16)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -914,9 +868,7 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_2(<vscale x 2 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v10, v10, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i64> %splat, %vb
+  %vc = shl <vscale x 2 x i64> splat (i64 16), %vb
   %vd = urem <vscale x 2 x i64> %va, %vc
   ret <vscale x 2 x i64> %vd
 }
@@ -995,9 +947,7 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = urem <vscale x 4 x i64> %va, %splat
+  %vc = urem <vscale x 4 x i64> %va, splat (i64 -7)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1008,9 +958,7 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = urem <vscale x 4 x i64> %va, %splat
+  %vc = urem <vscale x 4 x i64> %va, splat (i64 16)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1025,9 +973,7 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_2(<vscale x 4 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v12, v12, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i64> %splat, %vb
+  %vc = shl <vscale x 4 x i64> splat (i64 16), %vb
   %vd = urem <vscale x 4 x i64> %va, %vc
   ret <vscale x 4 x i64> %vd
 }
@@ -1106,9 +1052,7 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-V-NEXT:    li a0, -7
 ; RV64-V-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = urem <vscale x 8 x i64> %va, %splat
+  %vc = urem <vscale x 8 x i64> %va, splat (i64 -7)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1119,9 +1063,7 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = urem <vscale x 8 x i64> %va, %splat
+  %vc = urem <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1136,9 +1078,7 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_2(<vscale x 8 x i64> %va, <vscale x
 ; CHECK-NEXT:    vadd.vi v16, v16, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i64> %splat, %vb
+  %vc = shl <vscale x 8 x i64> splat (i64 16), %vb
   %vd = urem <vscale x 8 x i64> %va, %vc
   ret <vscale x 8 x i64> %vd
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
index 1be66bd74f8e1d..61bdd5b8d3c8a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vremu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -67,9 +65,7 @@ define <vscale x 1 x i8> @vremu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -91,9 +87,7 @@ define <vscale x 2 x i8> @vremu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -117,9 +111,7 @@ define <vscale x 2 x i8> @vremu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -153,9 +145,7 @@ define <vscale x 4 x i8> @vremu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -179,9 +169,7 @@ define <vscale x 4 x i8> @vremu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -203,9 +191,7 @@ define <vscale x 8 x i8> @vremu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -229,9 +215,7 @@ define <vscale x 8 x i8> @vremu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -253,9 +237,7 @@ define <vscale x 16 x i8> @vremu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -279,9 +261,7 @@ define <vscale x 16 x i8> @vremu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -303,9 +283,7 @@ define <vscale x 32 x i8> @vremu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -329,9 +307,7 @@ define <vscale x 32 x i8> @vremu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -353,9 +329,7 @@ define <vscale x 64 x i8> @vremu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -379,9 +353,7 @@ define <vscale x 64 x i8> @vremu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -403,9 +375,7 @@ define <vscale x 1 x i16> @vremu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -429,9 +399,7 @@ define <vscale x 1 x i16> @vremu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -453,9 +421,7 @@ define <vscale x 2 x i16> @vremu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -479,9 +445,7 @@ define <vscale x 2 x i16> @vremu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -503,9 +467,7 @@ define <vscale x 4 x i16> @vremu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -529,9 +491,7 @@ define <vscale x 4 x i16> @vremu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -553,9 +513,7 @@ define <vscale x 8 x i16> @vremu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -579,9 +537,7 @@ define <vscale x 8 x i16> @vremu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -603,9 +559,7 @@ define <vscale x 16 x i16> @vremu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -629,9 +583,7 @@ define <vscale x 16 x i16> @vremu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -653,9 +605,7 @@ define <vscale x 32 x i16> @vremu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -679,9 +629,7 @@ define <vscale x 32 x i16> @vremu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -703,9 +651,7 @@ define <vscale x 1 x i32> @vremu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -729,9 +675,7 @@ define <vscale x 1 x i32> @vremu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -753,9 +697,7 @@ define <vscale x 2 x i32> @vremu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -779,9 +721,7 @@ define <vscale x 2 x i32> @vremu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -803,9 +743,7 @@ define <vscale x 4 x i32> @vremu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -829,9 +767,7 @@ define <vscale x 4 x i32> @vremu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -853,9 +789,7 @@ define <vscale x 8 x i32> @vremu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -879,9 +813,7 @@ define <vscale x 8 x i32> @vremu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -903,9 +835,7 @@ define <vscale x 16 x i32> @vremu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -929,9 +859,7 @@ define <vscale x 16 x i32> @vremu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -953,9 +881,7 @@ define <vscale x 1 x i64> @vremu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1007,9 +933,7 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1031,9 +955,7 @@ define <vscale x 2 x i64> @vremu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1085,9 +1007,7 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1109,9 +1029,7 @@ define <vscale x 4 x i64> @vremu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1163,9 +1081,7 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1187,9 +1103,7 @@ define <vscale x 8 x i64> @vremu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1241,8 +1155,6 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
index 127d3ffd793154..e97b1f41ad3d36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll
@@ -20,9 +20,7 @@ define <vscale x 1 x i8> @vrsub_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i8> %splat, %va
+  %vc = sub <vscale x 1 x i8> splat (i8 -4), %va
   ret <vscale x 1 x i8> %vc
 }
 
@@ -44,9 +42,7 @@ define <vscale x 2 x i8> @vrsub_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i8> %splat, %va
+  %vc = sub <vscale x 2 x i8> splat (i8 -4), %va
   ret <vscale x 2 x i8> %vc
 }
 
@@ -68,9 +64,7 @@ define <vscale x 4 x i8> @vrsub_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i8> %splat, %va
+  %vc = sub <vscale x 4 x i8> splat (i8 -4), %va
   ret <vscale x 4 x i8> %vc
 }
 
@@ -92,9 +86,7 @@ define <vscale x 8 x i8> @vrsub_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i8> %splat, %va
+  %vc = sub <vscale x 8 x i8> splat (i8 -4), %va
   ret <vscale x 8 x i8> %vc
 }
 
@@ -116,9 +108,7 @@ define <vscale x 16 x i8> @vrsub_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i8> %splat, %va
+  %vc = sub <vscale x 16 x i8> splat (i8 -4), %va
   ret <vscale x 16 x i8> %vc
 }
 
@@ -140,9 +130,7 @@ define <vscale x 32 x i8> @vrsub_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sub <vscale x 32 x i8> %splat, %va
+  %vc = sub <vscale x 32 x i8> splat (i8 -4), %va
   ret <vscale x 32 x i8> %vc
 }
 
@@ -164,9 +152,7 @@ define <vscale x 64 x i8> @vrsub_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -4, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = sub <vscale x 64 x i8> %splat, %va
+  %vc = sub <vscale x 64 x i8> splat (i8 -4), %va
   ret <vscale x 64 x i8> %vc
 }
 
@@ -188,9 +174,7 @@ define <vscale x 1 x i16> @vrsub_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i16> %splat, %va
+  %vc = sub <vscale x 1 x i16> splat (i16 -4), %va
   ret <vscale x 1 x i16> %vc
 }
 
@@ -212,9 +196,7 @@ define <vscale x 2 x i16> @vrsub_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i16> %splat, %va
+  %vc = sub <vscale x 2 x i16> splat (i16 -4), %va
   ret <vscale x 2 x i16> %vc
 }
 
@@ -236,9 +218,7 @@ define <vscale x 4 x i16> @vrsub_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i16> %splat, %va
+  %vc = sub <vscale x 4 x i16> splat (i16 -4), %va
   ret <vscale x 4 x i16> %vc
 }
 
@@ -260,9 +240,7 @@ define <vscale x 8 x i16> @vrsub_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i16> %splat, %va
+  %vc = sub <vscale x 8 x i16> splat (i16 -4), %va
   ret <vscale x 8 x i16> %vc
 }
 
@@ -284,9 +262,7 @@ define <vscale x 16 x i16> @vrsub_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i16> %splat, %va
+  %vc = sub <vscale x 16 x i16> splat (i16 -4), %va
   ret <vscale x 16 x i16> %vc
 }
 
@@ -308,9 +284,7 @@ define <vscale x 32 x i16> @vrsub_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -4, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sub <vscale x 32 x i16> %splat, %va
+  %vc = sub <vscale x 32 x i16> splat (i16 -4), %va
   ret <vscale x 32 x i16> %vc
 }
 
@@ -332,9 +306,7 @@ define <vscale x 1 x i32> @vrsub_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -4, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i32> %splat, %va
+  %vc = sub <vscale x 1 x i32> splat (i32 -4), %va
   ret <vscale x 1 x i32> %vc
 }
 
@@ -356,9 +328,7 @@ define <vscale x 2 x i32> @vrsub_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -4, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i32> %splat, %va
+  %vc = sub <vscale x 2 x i32> splat (i32 -4), %va
   ret <vscale x 2 x i32> %vc
 }
 
@@ -380,9 +350,7 @@ define <vscale x 4 x i32> @vrsub_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -4, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i32> %splat, %va
+  %vc = sub <vscale x 4 x i32> splat (i32 -4), %va
   ret <vscale x 4 x i32> %vc
 }
 
@@ -404,9 +372,7 @@ define <vscale x 8 x i32> @vrsub_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -4, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i32> %splat, %va
+  %vc = sub <vscale x 8 x i32> splat (i32 -4), %va
   ret <vscale x 8 x i32> %vc
 }
 
@@ -428,9 +394,7 @@ define <vscale x 16 x i32> @vrsub_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -4, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i32> %splat, %va
+  %vc = sub <vscale x 16 x i32> splat (i32 -4), %va
   ret <vscale x 16 x i32> %vc
 }
 
@@ -465,9 +429,7 @@ define <vscale x 1 x i64> @vrsub_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -4, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i64> %splat, %va
+  %vc = sub <vscale x 1 x i64> splat (i64 -4), %va
   ret <vscale x 1 x i64> %vc
 }
 
@@ -502,9 +464,7 @@ define <vscale x 2 x i64> @vrsub_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -4, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i64> %splat, %va
+  %vc = sub <vscale x 2 x i64> splat (i64 -4), %va
   ret <vscale x 2 x i64> %vc
 }
 
@@ -539,9 +499,7 @@ define <vscale x 4 x i64> @vrsub_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -4, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i64> %splat, %va
+  %vc = sub <vscale x 4 x i64> splat (i64 -4), %va
   ret <vscale x 4 x i64> %vc
 }
 
@@ -576,8 +534,6 @@ define <vscale x 8 x i64> @vrsub_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, -4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -4, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i64> %splat, %va
+  %vc = sub <vscale x 8 x i64> splat (i64 -4), %va
   ret <vscale x 8 x i64> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
index a6eb4de03e1ccd..be372c9aa54d5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
@@ -26,9 +26,7 @@ define <vscale x 1 x i8> @vrsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -38,9 +36,7 @@ define <vscale x 1 x i8> @vrsub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -50,11 +46,7 @@ define <vscale x 1 x i8> @vrsub_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -80,9 +72,7 @@ define <vscale x 2 x i8> @vrsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -92,9 +82,7 @@ define <vscale x 2 x i8> @vrsub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -104,11 +92,7 @@ define <vscale x 2 x i8> @vrsub_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -134,9 +118,7 @@ define <vscale x 4 x i8> @vrsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -146,9 +128,7 @@ define <vscale x 4 x i8> @vrsub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -158,11 +138,7 @@ define <vscale x 4 x i8> @vrsub_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -188,9 +164,7 @@ define <vscale x 8 x i8> @vrsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -200,9 +174,7 @@ define <vscale x 8 x i8> @vrsub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -212,11 +184,7 @@ define <vscale x 8 x i8> @vrsub_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -242,9 +210,7 @@ define <vscale x 16 x i8> @vrsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -254,9 +220,7 @@ define <vscale x 16 x i8> @vrsub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -266,11 +230,7 @@ define <vscale x 16 x i8> @vrsub_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -296,9 +256,7 @@ define <vscale x 32 x i8> @vrsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -308,9 +266,7 @@ define <vscale x 32 x i8> @vrsub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -320,11 +276,7 @@ define <vscale x 32 x i8> @vrsub_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -350,9 +302,7 @@ define <vscale x 64 x i8> @vrsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -362,9 +312,7 @@ define <vscale x 64 x i8> @vrsub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -374,11 +322,7 @@ define <vscale x 64 x i8> @vrsub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -404,9 +348,7 @@ define <vscale x 1 x i16> @vrsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -416,9 +358,7 @@ define <vscale x 1 x i16> @vrsub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -428,11 +368,7 @@ define <vscale x 1 x i16> @vrsub_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -458,9 +394,7 @@ define <vscale x 2 x i16> @vrsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -470,9 +404,7 @@ define <vscale x 2 x i16> @vrsub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -482,11 +414,7 @@ define <vscale x 2 x i16> @vrsub_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -512,9 +440,7 @@ define <vscale x 4 x i16> @vrsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -524,9 +450,7 @@ define <vscale x 4 x i16> @vrsub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -536,11 +460,7 @@ define <vscale x 4 x i16> @vrsub_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -566,9 +486,7 @@ define <vscale x 8 x i16> @vrsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -578,9 +496,7 @@ define <vscale x 8 x i16> @vrsub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -590,11 +506,7 @@ define <vscale x 8 x i16> @vrsub_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -620,9 +532,7 @@ define <vscale x 16 x i16> @vrsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -632,9 +542,7 @@ define <vscale x 16 x i16> @vrsub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -644,11 +552,7 @@ define <vscale x 16 x i16> @vrsub_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -674,9 +578,7 @@ define <vscale x 32 x i16> @vrsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -686,9 +588,7 @@ define <vscale x 32 x i16> @vrsub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -698,11 +598,7 @@ define <vscale x 32 x i16> @vrsub_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -728,9 +624,7 @@ define <vscale x 1 x i32> @vrsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -740,9 +634,7 @@ define <vscale x 1 x i32> @vrsub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -752,11 +644,7 @@ define <vscale x 1 x i32> @vrsub_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -782,9 +670,7 @@ define <vscale x 2 x i32> @vrsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -794,9 +680,7 @@ define <vscale x 2 x i32> @vrsub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -806,11 +690,7 @@ define <vscale x 2 x i32> @vrsub_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -836,9 +716,7 @@ define <vscale x 4 x i32> @vrsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -848,9 +726,7 @@ define <vscale x 4 x i32> @vrsub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -860,11 +736,7 @@ define <vscale x 4 x i32> @vrsub_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -890,9 +762,7 @@ define <vscale x 8 x i32> @vrsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -902,9 +772,7 @@ define <vscale x 8 x i32> @vrsub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -914,11 +782,7 @@ define <vscale x 8 x i32> @vrsub_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -944,9 +808,7 @@ define <vscale x 16 x i32> @vrsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -956,9 +818,7 @@ define <vscale x 16 x i32> @vrsub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -968,11 +828,7 @@ define <vscale x 16 x i32> @vrsub_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1026,9 +882,7 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1038,9 +892,7 @@ define <vscale x 1 x i64> @vrsub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1050,11 +902,7 @@ define <vscale x 1 x i64> @vrsub_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1108,9 +956,7 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1120,9 +966,7 @@ define <vscale x 2 x i64> @vrsub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1132,11 +976,7 @@ define <vscale x 2 x i64> @vrsub_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1190,9 +1030,7 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1202,9 +1040,7 @@ define <vscale x 4 x i64> @vrsub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1214,11 +1050,7 @@ define <vscale x 4 x i64> @vrsub_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1272,9 +1104,7 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1284,9 +1114,7 @@ define <vscale x 8 x i64> @vrsub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1296,10 +1124,6 @@ define <vscale x 8 x i64> @vrsub_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vrsub.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
index 38edd19096f845..6a8b801254057b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll
@@ -34,9 +34,7 @@ define <vscale x 1 x i8> @sadd_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
+  %v = call <vscale x 1 x i8> @llvm.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 5))
   ret <vscale x 1 x i8> %v
 }
 
@@ -70,9 +68,7 @@ define <vscale x 2 x i8> @sadd_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
+  %v = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 5))
   ret <vscale x 2 x i8> %v
 }
 
@@ -106,9 +102,7 @@ define <vscale x 4 x i8> @sadd_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
+  %v = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 5))
   ret <vscale x 4 x i8> %v
 }
 
@@ -142,9 +136,7 @@ define <vscale x 8 x i8> @sadd_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
+  %v = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 5))
   ret <vscale x 8 x i8> %v
 }
 
@@ -178,9 +170,7 @@ define <vscale x 16 x i8> @sadd_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
+  %v = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 5))
   ret <vscale x 16 x i8> %v
 }
 
@@ -214,9 +204,7 @@ define <vscale x 32 x i8> @sadd_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
+  %v = call <vscale x 32 x i8> @llvm.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 5))
   ret <vscale x 32 x i8> %v
 }
 
@@ -250,9 +238,7 @@ define <vscale x 64 x i8> @sadd_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
+  %v = call <vscale x 64 x i8> @llvm.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 5))
   ret <vscale x 64 x i8> %v
 }
 
@@ -286,9 +272,7 @@ define <vscale x 1 x i16> @sadd_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
+  %v = call <vscale x 1 x i16> @llvm.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 5))
   ret <vscale x 1 x i16> %v
 }
 
@@ -322,9 +306,7 @@ define <vscale x 2 x i16> @sadd_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
+  %v = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 5))
   ret <vscale x 2 x i16> %v
 }
 
@@ -358,9 +340,7 @@ define <vscale x 4 x i16> @sadd_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
+  %v = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 5))
   ret <vscale x 4 x i16> %v
 }
 
@@ -394,9 +374,7 @@ define <vscale x 8 x i16> @sadd_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
+  %v = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 5))
   ret <vscale x 8 x i16> %v
 }
 
@@ -430,9 +408,7 @@ define <vscale x 16 x i16> @sadd_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
+  %v = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 5))
   ret <vscale x 16 x i16> %v
 }
 
@@ -466,9 +442,7 @@ define <vscale x 32 x i16> @sadd_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
+  %v = call <vscale x 32 x i16> @llvm.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 5))
   ret <vscale x 32 x i16> %v
 }
 
@@ -502,9 +476,7 @@ define <vscale x 1 x i32> @sadd_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
+  %v = call <vscale x 1 x i32> @llvm.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 5))
   ret <vscale x 1 x i32> %v
 }
 
@@ -538,9 +510,7 @@ define <vscale x 2 x i32> @sadd_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
+  %v = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 5))
   ret <vscale x 2 x i32> %v
 }
 
@@ -574,9 +544,7 @@ define <vscale x 4 x i32> @sadd_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
+  %v = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 5))
   ret <vscale x 4 x i32> %v
 }
 
@@ -610,9 +578,7 @@ define <vscale x 8 x i32> @sadd_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
+  %v = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 5))
   ret <vscale x 8 x i32> %v
 }
 
@@ -646,9 +612,7 @@ define <vscale x 16 x i32> @sadd_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
+  %v = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 5))
   ret <vscale x 16 x i32> %v
 }
 
@@ -695,9 +659,7 @@ define <vscale x 1 x i64> @sadd_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
+  %v = call <vscale x 1 x i64> @llvm.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 5))
   ret <vscale x 1 x i64> %v
 }
 
@@ -744,9 +706,7 @@ define <vscale x 2 x i64> @sadd_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
+  %v = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 5))
   ret <vscale x 2 x i64> %v
 }
 
@@ -793,9 +753,7 @@ define <vscale x 4 x i64> @sadd_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
+  %v = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 5))
   ret <vscale x 4 x i64> %v
 }
 
@@ -842,8 +800,6 @@ define <vscale x 8 x i64> @sadd_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
+  %v = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 5))
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index caaeae55ed78ed..f9ea5143cfcb70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -43,9 +43,7 @@ define <vscale x 1 x i8> @vsadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -81,9 +79,7 @@ define <vscale x 1 x i8> @vsadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -93,9 +89,7 @@ define <vscale x 1 x i8> @vsadd_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -105,11 +99,7 @@ define <vscale x 1 x i8> @vsadd_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -131,9 +121,7 @@ define <vscale x 2 x i8> @vsadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -157,9 +145,7 @@ define <vscale x 2 x i8> @vsadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -169,9 +155,7 @@ define <vscale x 2 x i8> @vsadd_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -181,11 +165,7 @@ define <vscale x 2 x i8> @vsadd_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -207,9 +187,7 @@ define <vscale x 3 x i8> @vsadd_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -233,9 +211,7 @@ define <vscale x 3 x i8> @vsadd_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -245,9 +221,7 @@ define <vscale x 3 x i8> @vsadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -257,11 +231,7 @@ define <vscale x 3 x i8> @vsadd_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -283,9 +253,7 @@ define <vscale x 4 x i8> @vsadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -309,9 +277,7 @@ define <vscale x 4 x i8> @vsadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -321,9 +287,7 @@ define <vscale x 4 x i8> @vsadd_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -333,11 +297,7 @@ define <vscale x 4 x i8> @vsadd_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -359,9 +319,7 @@ define <vscale x 8 x i8> @vsadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -385,9 +343,7 @@ define <vscale x 8 x i8> @vsadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -397,9 +353,7 @@ define <vscale x 8 x i8> @vsadd_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -409,11 +363,7 @@ define <vscale x 8 x i8> @vsadd_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -435,9 +385,7 @@ define <vscale x 16 x i8> @vsadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -461,9 +409,7 @@ define <vscale x 16 x i8> @vsadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -473,9 +419,7 @@ define <vscale x 16 x i8> @vsadd_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -485,11 +429,7 @@ define <vscale x 16 x i8> @vsadd_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -511,9 +451,7 @@ define <vscale x 32 x i8> @vsadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -537,9 +475,7 @@ define <vscale x 32 x i8> @vsadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -549,9 +485,7 @@ define <vscale x 32 x i8> @vsadd_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -561,11 +495,7 @@ define <vscale x 32 x i8> @vsadd_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -587,9 +517,7 @@ define <vscale x 64 x i8> @vsadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -613,9 +541,7 @@ define <vscale x 64 x i8> @vsadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -625,9 +551,7 @@ define <vscale x 64 x i8> @vsadd_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -637,11 +561,7 @@ define <vscale x 64 x i8> @vsadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -671,9 +591,7 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -695,11 +613,7 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -721,9 +635,7 @@ define <vscale x 1 x i16> @vsadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -747,9 +659,7 @@ define <vscale x 1 x i16> @vsadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -759,9 +669,7 @@ define <vscale x 1 x i16> @vsadd_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -771,11 +679,7 @@ define <vscale x 1 x i16> @vsadd_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -797,9 +701,7 @@ define <vscale x 2 x i16> @vsadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -823,9 +725,7 @@ define <vscale x 2 x i16> @vsadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -835,9 +735,7 @@ define <vscale x 2 x i16> @vsadd_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -847,11 +745,7 @@ define <vscale x 2 x i16> @vsadd_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -873,9 +767,7 @@ define <vscale x 4 x i16> @vsadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -899,9 +791,7 @@ define <vscale x 4 x i16> @vsadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -911,9 +801,7 @@ define <vscale x 4 x i16> @vsadd_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -923,11 +811,7 @@ define <vscale x 4 x i16> @vsadd_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -949,9 +833,7 @@ define <vscale x 8 x i16> @vsadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -975,9 +857,7 @@ define <vscale x 8 x i16> @vsadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -987,9 +867,7 @@ define <vscale x 8 x i16> @vsadd_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -999,11 +877,7 @@ define <vscale x 8 x i16> @vsadd_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1025,9 +899,7 @@ define <vscale x 16 x i16> @vsadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1051,9 +923,7 @@ define <vscale x 16 x i16> @vsadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1063,9 +933,7 @@ define <vscale x 16 x i16> @vsadd_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1075,11 +943,7 @@ define <vscale x 16 x i16> @vsadd_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1101,9 +965,7 @@ define <vscale x 32 x i16> @vsadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1127,9 +989,7 @@ define <vscale x 32 x i16> @vsadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1139,9 +999,7 @@ define <vscale x 32 x i16> @vsadd_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1151,11 +1009,7 @@ define <vscale x 32 x i16> @vsadd_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1177,9 +1031,7 @@ define <vscale x 1 x i32> @vsadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1203,9 +1055,7 @@ define <vscale x 1 x i32> @vsadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1215,9 +1065,7 @@ define <vscale x 1 x i32> @vsadd_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1227,11 +1075,7 @@ define <vscale x 1 x i32> @vsadd_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1253,9 +1097,7 @@ define <vscale x 2 x i32> @vsadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1279,9 +1121,7 @@ define <vscale x 2 x i32> @vsadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1291,9 +1131,7 @@ define <vscale x 2 x i32> @vsadd_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1303,11 +1141,7 @@ define <vscale x 2 x i32> @vsadd_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1329,9 +1163,7 @@ define <vscale x 4 x i32> @vsadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1355,9 +1187,7 @@ define <vscale x 4 x i32> @vsadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1367,9 +1197,7 @@ define <vscale x 4 x i32> @vsadd_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1379,11 +1207,7 @@ define <vscale x 4 x i32> @vsadd_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1405,9 +1229,7 @@ define <vscale x 8 x i32> @vsadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1431,9 +1253,7 @@ define <vscale x 8 x i32> @vsadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1443,9 +1263,7 @@ define <vscale x 8 x i32> @vsadd_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1455,11 +1273,7 @@ define <vscale x 8 x i32> @vsadd_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1481,9 +1295,7 @@ define <vscale x 16 x i32> @vsadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1507,9 +1319,7 @@ define <vscale x 16 x i32> @vsadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1519,9 +1329,7 @@ define <vscale x 16 x i32> @vsadd_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1531,11 +1339,7 @@ define <vscale x 16 x i32> @vsadd_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1566,9 +1370,7 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1590,11 +1392,7 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1616,9 +1414,7 @@ define <vscale x 1 x i64> @vsadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1670,9 +1466,7 @@ define <vscale x 1 x i64> @vsadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1682,9 +1476,7 @@ define <vscale x 1 x i64> @vsadd_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1694,11 +1486,7 @@ define <vscale x 1 x i64> @vsadd_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1720,9 +1508,7 @@ define <vscale x 2 x i64> @vsadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1774,9 +1560,7 @@ define <vscale x 2 x i64> @vsadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1786,9 +1570,7 @@ define <vscale x 2 x i64> @vsadd_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1798,11 +1580,7 @@ define <vscale x 2 x i64> @vsadd_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1824,9 +1602,7 @@ define <vscale x 4 x i64> @vsadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1878,9 +1654,7 @@ define <vscale x 4 x i64> @vsadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1890,9 +1664,7 @@ define <vscale x 4 x i64> @vsadd_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1902,11 +1674,7 @@ define <vscale x 4 x i64> @vsadd_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1928,9 +1696,7 @@ define <vscale x 8 x i64> @vsadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1982,9 +1748,7 @@ define <vscale x 8 x i64> @vsadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1994,9 +1758,7 @@ define <vscale x 8 x i64> @vsadd_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2006,10 +1768,6 @@ define <vscale x 8 x i64> @vsadd_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
index ac520f89112086..4fe765c34ba6ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll
@@ -34,9 +34,7 @@ define <vscale x 1 x i8> @uadd_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
+  %v = call <vscale x 1 x i8> @llvm.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 8))
   ret <vscale x 1 x i8> %v
 }
 
@@ -70,9 +68,7 @@ define <vscale x 2 x i8> @uadd_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
+  %v = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 8))
   ret <vscale x 2 x i8> %v
 }
 
@@ -106,9 +102,7 @@ define <vscale x 4 x i8> @uadd_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
+  %v = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 8))
   ret <vscale x 4 x i8> %v
 }
 
@@ -142,9 +136,7 @@ define <vscale x 8 x i8> @uadd_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
+  %v = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 8))
   ret <vscale x 8 x i8> %v
 }
 
@@ -178,9 +170,7 @@ define <vscale x 16 x i8> @uadd_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
+  %v = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 8))
   ret <vscale x 16 x i8> %v
 }
 
@@ -214,9 +204,7 @@ define <vscale x 32 x i8> @uadd_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
+  %v = call <vscale x 32 x i8> @llvm.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 8))
   ret <vscale x 32 x i8> %v
 }
 
@@ -250,9 +238,7 @@ define <vscale x 64 x i8> @uadd_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 8, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
+  %v = call <vscale x 64 x i8> @llvm.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 8))
   ret <vscale x 64 x i8> %v
 }
 
@@ -286,9 +272,7 @@ define <vscale x 1 x i16> @uadd_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
+  %v = call <vscale x 1 x i16> @llvm.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 8))
   ret <vscale x 1 x i16> %v
 }
 
@@ -322,9 +306,7 @@ define <vscale x 2 x i16> @uadd_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
+  %v = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 8))
   ret <vscale x 2 x i16> %v
 }
 
@@ -358,9 +340,7 @@ define <vscale x 4 x i16> @uadd_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
+  %v = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 8))
   ret <vscale x 4 x i16> %v
 }
 
@@ -394,9 +374,7 @@ define <vscale x 8 x i16> @uadd_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
+  %v = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 8))
   ret <vscale x 8 x i16> %v
 }
 
@@ -430,9 +408,7 @@ define <vscale x 16 x i16> @uadd_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
+  %v = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 8))
   ret <vscale x 16 x i16> %v
 }
 
@@ -466,9 +442,7 @@ define <vscale x 32 x i16> @uadd_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 8, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
+  %v = call <vscale x 32 x i16> @llvm.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 8))
   ret <vscale x 32 x i16> %v
 }
 
@@ -502,9 +476,7 @@ define <vscale x 1 x i32> @uadd_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
+  %v = call <vscale x 1 x i32> @llvm.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 8))
   ret <vscale x 1 x i32> %v
 }
 
@@ -538,9 +510,7 @@ define <vscale x 2 x i32> @uadd_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
+  %v = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 8))
   ret <vscale x 2 x i32> %v
 }
 
@@ -574,9 +544,7 @@ define <vscale x 4 x i32> @uadd_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
+  %v = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 8))
   ret <vscale x 4 x i32> %v
 }
 
@@ -610,9 +578,7 @@ define <vscale x 8 x i32> @uadd_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
+  %v = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 8))
   ret <vscale x 8 x i32> %v
 }
 
@@ -646,9 +612,7 @@ define <vscale x 16 x i32> @uadd_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 8, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
+  %v = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 8))
   ret <vscale x 16 x i32> %v
 }
 
@@ -695,9 +659,7 @@ define <vscale x 1 x i64> @uadd_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
+  %v = call <vscale x 1 x i64> @llvm.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 8))
   ret <vscale x 1 x i64> %v
 }
 
@@ -744,9 +706,7 @@ define <vscale x 2 x i64> @uadd_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
+  %v = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 8))
   ret <vscale x 2 x i64> %v
 }
 
@@ -793,9 +753,7 @@ define <vscale x 4 x i64> @uadd_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
+  %v = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 8))
   ret <vscale x 4 x i64> %v
 }
 
@@ -842,8 +800,6 @@ define <vscale x 8 x i64> @uadd_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 8, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
+  %v = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 8))
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index c0779e508c0a91..745b93b257085a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -42,9 +42,7 @@ define <vscale x 1 x i8> @vsaddu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -80,9 +78,7 @@ define <vscale x 1 x i8> @vsaddu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -92,9 +88,7 @@ define <vscale x 1 x i8> @vsaddu_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -104,11 +98,7 @@ define <vscale x 1 x i8> @vsaddu_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -130,9 +120,7 @@ define <vscale x 2 x i8> @vsaddu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -156,9 +144,7 @@ define <vscale x 2 x i8> @vsaddu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -168,9 +154,7 @@ define <vscale x 2 x i8> @vsaddu_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -180,11 +164,7 @@ define <vscale x 2 x i8> @vsaddu_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -206,9 +186,7 @@ define <vscale x 3 x i8> @vsaddu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -232,9 +210,7 @@ define <vscale x 3 x i8> @vsaddu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -244,9 +220,7 @@ define <vscale x 3 x i8> @vsaddu_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -256,11 +230,7 @@ define <vscale x 3 x i8> @vsaddu_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -282,9 +252,7 @@ define <vscale x 4 x i8> @vsaddu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -308,9 +276,7 @@ define <vscale x 4 x i8> @vsaddu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -320,9 +286,7 @@ define <vscale x 4 x i8> @vsaddu_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -332,11 +296,7 @@ define <vscale x 4 x i8> @vsaddu_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -358,9 +318,7 @@ define <vscale x 8 x i8> @vsaddu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -384,9 +342,7 @@ define <vscale x 8 x i8> @vsaddu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -396,9 +352,7 @@ define <vscale x 8 x i8> @vsaddu_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -408,11 +362,7 @@ define <vscale x 8 x i8> @vsaddu_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -434,9 +384,7 @@ define <vscale x 16 x i8> @vsaddu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -460,9 +408,7 @@ define <vscale x 16 x i8> @vsaddu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -472,9 +418,7 @@ define <vscale x 16 x i8> @vsaddu_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -484,11 +428,7 @@ define <vscale x 16 x i8> @vsaddu_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -510,9 +450,7 @@ define <vscale x 32 x i8> @vsaddu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -536,9 +474,7 @@ define <vscale x 32 x i8> @vsaddu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -548,9 +484,7 @@ define <vscale x 32 x i8> @vsaddu_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -560,11 +494,7 @@ define <vscale x 32 x i8> @vsaddu_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -586,9 +516,7 @@ define <vscale x 64 x i8> @vsaddu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -612,9 +540,7 @@ define <vscale x 64 x i8> @vsaddu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -624,9 +550,7 @@ define <vscale x 64 x i8> @vsaddu_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 6
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -636,11 +560,7 @@ define <vscale x 64 x i8> @vsaddu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -670,9 +590,7 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -694,11 +612,7 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -720,9 +634,7 @@ define <vscale x 1 x i16> @vsaddu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -746,9 +658,7 @@ define <vscale x 1 x i16> @vsaddu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -758,9 +668,7 @@ define <vscale x 1 x i16> @vsaddu_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -770,11 +678,7 @@ define <vscale x 1 x i16> @vsaddu_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -796,9 +700,7 @@ define <vscale x 2 x i16> @vsaddu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -822,9 +724,7 @@ define <vscale x 2 x i16> @vsaddu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -834,9 +734,7 @@ define <vscale x 2 x i16> @vsaddu_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -846,11 +744,7 @@ define <vscale x 2 x i16> @vsaddu_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -872,9 +766,7 @@ define <vscale x 4 x i16> @vsaddu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -898,9 +790,7 @@ define <vscale x 4 x i16> @vsaddu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -910,9 +800,7 @@ define <vscale x 4 x i16> @vsaddu_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -922,11 +810,7 @@ define <vscale x 4 x i16> @vsaddu_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -948,9 +832,7 @@ define <vscale x 8 x i16> @vsaddu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -974,9 +856,7 @@ define <vscale x 8 x i16> @vsaddu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -986,9 +866,7 @@ define <vscale x 8 x i16> @vsaddu_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -998,11 +876,7 @@ define <vscale x 8 x i16> @vsaddu_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1024,9 +898,7 @@ define <vscale x 16 x i16> @vsaddu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1050,9 +922,7 @@ define <vscale x 16 x i16> @vsaddu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1062,9 +932,7 @@ define <vscale x 16 x i16> @vsaddu_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1074,11 +942,7 @@ define <vscale x 16 x i16> @vsaddu_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1100,9 +964,7 @@ define <vscale x 32 x i16> @vsaddu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1126,9 +988,7 @@ define <vscale x 32 x i16> @vsaddu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1138,9 +998,7 @@ define <vscale x 32 x i16> @vsaddu_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1150,11 +1008,7 @@ define <vscale x 32 x i16> @vsaddu_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1176,9 +1030,7 @@ define <vscale x 1 x i32> @vsaddu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1202,9 +1054,7 @@ define <vscale x 1 x i32> @vsaddu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1214,9 +1064,7 @@ define <vscale x 1 x i32> @vsaddu_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1226,11 +1074,7 @@ define <vscale x 1 x i32> @vsaddu_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1252,9 +1096,7 @@ define <vscale x 2 x i32> @vsaddu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1278,9 +1120,7 @@ define <vscale x 2 x i32> @vsaddu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1290,9 +1130,7 @@ define <vscale x 2 x i32> @vsaddu_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1302,11 +1140,7 @@ define <vscale x 2 x i32> @vsaddu_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1328,9 +1162,7 @@ define <vscale x 4 x i32> @vsaddu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1354,9 +1186,7 @@ define <vscale x 4 x i32> @vsaddu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1366,9 +1196,7 @@ define <vscale x 4 x i32> @vsaddu_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1378,11 +1206,7 @@ define <vscale x 4 x i32> @vsaddu_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1404,9 +1228,7 @@ define <vscale x 8 x i32> @vsaddu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1430,9 +1252,7 @@ define <vscale x 8 x i32> @vsaddu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1442,9 +1262,7 @@ define <vscale x 8 x i32> @vsaddu_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1454,11 +1272,7 @@ define <vscale x 8 x i32> @vsaddu_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1480,9 +1294,7 @@ define <vscale x 16 x i32> @vsaddu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1506,9 +1318,7 @@ define <vscale x 16 x i32> @vsaddu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1518,9 +1328,7 @@ define <vscale x 16 x i32> @vsaddu_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1530,11 +1338,7 @@ define <vscale x 16 x i32> @vsaddu_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1565,9 +1369,7 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1589,11 +1391,7 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1615,9 +1413,7 @@ define <vscale x 1 x i64> @vsaddu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1669,9 +1465,7 @@ define <vscale x 1 x i64> @vsaddu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1681,9 +1475,7 @@ define <vscale x 1 x i64> @vsaddu_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1693,11 +1485,7 @@ define <vscale x 1 x i64> @vsaddu_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1719,9 +1507,7 @@ define <vscale x 2 x i64> @vsaddu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1773,9 +1559,7 @@ define <vscale x 2 x i64> @vsaddu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1785,9 +1569,7 @@ define <vscale x 2 x i64> @vsaddu_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1797,11 +1579,7 @@ define <vscale x 2 x i64> @vsaddu_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1823,9 +1601,7 @@ define <vscale x 4 x i64> @vsaddu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1877,9 +1653,7 @@ define <vscale x 4 x i64> @vsaddu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1889,9 +1663,7 @@ define <vscale x 4 x i64> @vsaddu_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1901,11 +1673,7 @@ define <vscale x 4 x i64> @vsaddu_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1927,9 +1695,7 @@ define <vscale x 8 x i64> @vsaddu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1981,9 +1747,7 @@ define <vscale x 8 x i64> @vsaddu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1993,9 +1757,7 @@ define <vscale x 8 x i64> @vsaddu_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2005,10 +1767,6 @@ define <vscale x 8 x i64> @vsaddu_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 17c362fc0a1aeb..4457c1002acc7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -138,9 +138,7 @@ define <vscale x 8 x half> @vfmerge_zv_nxv8f16(<vscale x 8 x half> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x half> poison, half zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> %splat, <vscale x 8 x half> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x half> splat (half zeroinitializer), <vscale x 8 x half> %va
   ret <vscale x 8 x half> %vc
 }
 
@@ -148,9 +146,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
 ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
+  %vc = select <vscale x 8 x i1> splat (i1 1), <vscale x 8 x half> %va, <vscale x 8 x half> %vb
   ret <vscale x 8 x half> %vc
 }
 
@@ -322,9 +318,7 @@ define <vscale x 8 x float> @vfmerge_zv_nxv8f32(<vscale x 8 x float> %va, <vscal
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> %splat, <vscale x 8 x float> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x float> splat (float zeroinitializer), <vscale x 8 x float> %va
   ret <vscale x 8 x float> %vc
 }
 
@@ -444,9 +438,7 @@ define <vscale x 8 x double> @vfmerge_zv_nxv8f64(<vscale x 8 x double> %va, <vsc
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x double> poison, double zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> %splat, <vscale x 8 x double> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x double> splat (double zeroinitializer), <vscale x 8 x double> %va
   ret <vscale x 8 x double> %vc
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
index 19c7d599cb068c..2715ec78bd7949 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int.ll
@@ -32,9 +32,7 @@ define <vscale x 1 x i8> @vmerge_iv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> %splat, <vscale x 1 x i8> %va
+  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i8> splat (i8 3), <vscale x 1 x i8> %va
   ret <vscale x 1 x i8> %vc
 }
 
@@ -66,9 +64,7 @@ define <vscale x 2 x i8> @vmerge_iv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> %splat, <vscale x 2 x i8> %va
+  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i8> splat (i8 3), <vscale x 2 x i8> %va
   ret <vscale x 2 x i8> %vc
 }
 
@@ -100,9 +96,7 @@ define <vscale x 3 x i8> @vmerge_iv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> %splat, <vscale x 3 x i8> %va
+  %vc = select <vscale x 3 x i1> %cond, <vscale x 3 x i8> splat (i8 3), <vscale x 3 x i8> %va
   ret <vscale x 3 x i8> %vc
 }
 
@@ -134,9 +128,7 @@ define <vscale x 4 x i8> @vmerge_iv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> %splat, <vscale x 4 x i8> %va
+  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i8> splat (i8 3), <vscale x 4 x i8> %va
   ret <vscale x 4 x i8> %vc
 }
 
@@ -168,9 +160,7 @@ define <vscale x 8 x i8> @vmerge_iv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> %splat, <vscale x 8 x i8> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i8> splat (i8 3), <vscale x 8 x i8> %va
   ret <vscale x 8 x i8> %vc
 }
 
@@ -202,9 +192,7 @@ define <vscale x 16 x i8> @vmerge_iv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> %splat, <vscale x 16 x i8> %va
+  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i8> splat (i8 3), <vscale x 16 x i8> %va
   ret <vscale x 16 x i8> %vc
 }
 
@@ -236,9 +224,7 @@ define <vscale x 32 x i8> @vmerge_iv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 3
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> %splat, <vscale x 32 x i8> %va
+  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i8> splat (i8 3), <vscale x 32 x i8> %va
   ret <vscale x 32 x i8> %vc
 }
 
@@ -270,9 +256,7 @@ define <vscale x 64 x i8> @vmerge_iv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 6
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> %splat, <vscale x 64 x i8> %va
+  %vc = select <vscale x 64 x i1> %cond, <vscale x 64 x i8> splat (i8 3), <vscale x 64 x i8> %va
   ret <vscale x 64 x i8> %vc
 }
 
@@ -304,9 +288,7 @@ define <vscale x 1 x i16> @vmerge_iv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> %splat, <vscale x 1 x i16> %va
+  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i16> splat (i16 3), <vscale x 1 x i16> %va
   ret <vscale x 1 x i16> %vc
 }
 
@@ -338,9 +320,7 @@ define <vscale x 2 x i16> @vmerge_iv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> %splat, <vscale x 2 x i16> %va
+  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i16> splat (i16 3), <vscale x 2 x i16> %va
   ret <vscale x 2 x i16> %vc
 }
 
@@ -372,9 +352,7 @@ define <vscale x 4 x i16> @vmerge_iv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> %splat, <vscale x 4 x i16> %va
+  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i16> splat (i16 3), <vscale x 4 x i16> %va
   ret <vscale x 4 x i16> %vc
 }
 
@@ -406,9 +384,7 @@ define <vscale x 8 x i16> @vmerge_iv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> %splat, <vscale x 8 x i16> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i16> splat (i16 3), <vscale x 8 x i16> %va
   ret <vscale x 8 x i16> %vc
 }
 
@@ -440,9 +416,7 @@ define <vscale x 16 x i16> @vmerge_iv_nxv16i16(<vscale x 16 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> %splat, <vscale x 16 x i16> %va
+  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i16> splat (i16 3), <vscale x 16 x i16> %va
   ret <vscale x 16 x i16> %vc
 }
 
@@ -474,9 +448,7 @@ define <vscale x 32 x i16> @vmerge_iv_nxv32i16(<vscale x 32 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> %splat, <vscale x 32 x i16> %va
+  %vc = select <vscale x 32 x i1> %cond, <vscale x 32 x i16> splat (i16 3), <vscale x 32 x i16> %va
   ret <vscale x 32 x i16> %vc
 }
 
@@ -508,9 +480,7 @@ define <vscale x 1 x i32> @vmerge_iv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> %splat, <vscale x 1 x i32> %va
+  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i32> splat (i32 3), <vscale x 1 x i32> %va
   ret <vscale x 1 x i32> %vc
 }
 
@@ -542,9 +512,7 @@ define <vscale x 2 x i32> @vmerge_iv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> %splat, <vscale x 2 x i32> %va
+  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i32> splat (i32 3), <vscale x 2 x i32> %va
   ret <vscale x 2 x i32> %vc
 }
 
@@ -576,9 +544,7 @@ define <vscale x 4 x i32> @vmerge_iv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> %splat, <vscale x 4 x i32> %va
+  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i32> splat (i32 3), <vscale x 4 x i32> %va
   ret <vscale x 4 x i32> %vc
 }
 
@@ -610,9 +576,7 @@ define <vscale x 8 x i32> @vmerge_iv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> %splat, <vscale x 8 x i32> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i32> splat (i32 3), <vscale x 8 x i32> %va
   ret <vscale x 8 x i32> %vc
 }
 
@@ -644,9 +608,7 @@ define <vscale x 16 x i32> @vmerge_iv_nxv16i32(<vscale x 16 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> %splat, <vscale x 16 x i32> %va
+  %vc = select <vscale x 16 x i1> %cond, <vscale x 16 x i32> splat (i32 3), <vscale x 16 x i32> %va
   ret <vscale x 16 x i32> %vc
 }
 
@@ -690,9 +652,7 @@ define <vscale x 1 x i64> @vmerge_iv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> %splat, <vscale x 1 x i64> %va
+  %vc = select <vscale x 1 x i1> %cond, <vscale x 1 x i64> splat (i64 3), <vscale x 1 x i64> %va
   ret <vscale x 1 x i64> %vc
 }
 
@@ -736,9 +696,7 @@ define <vscale x 2 x i64> @vmerge_iv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> %splat, <vscale x 2 x i64> %va
+  %vc = select <vscale x 2 x i1> %cond, <vscale x 2 x i64> splat (i64 3), <vscale x 2 x i64> %va
   ret <vscale x 2 x i64> %vc
 }
 
@@ -782,9 +740,7 @@ define <vscale x 4 x i64> @vmerge_iv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> %splat, <vscale x 4 x i64> %va
+  %vc = select <vscale x 4 x i1> %cond, <vscale x 4 x i64> splat (i64 3), <vscale x 4 x i64> %va
   ret <vscale x 4 x i64> %vc
 }
 
@@ -828,9 +784,7 @@ define <vscale x 8 x i64> @vmerge_iv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmerge.vim v8, v8, 3, v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> %splat, <vscale x 8 x i64> %va
+  %vc = select <vscale x 8 x i1> %cond, <vscale x 8 x i64> splat (i64 3), <vscale x 8 x i64> %va
   ret <vscale x 8 x i64> %vc
 }
 
@@ -838,9 +792,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
 ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  %mhead = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
-  %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = select <vscale x 8 x i1> %mtrue, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
+  %vc = select <vscale x 8 x i1> splat (i1 1), <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
   ret <vscale x 8 x i64> %vc
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
index 93f43f4578ddb5..5b101cf673541b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
@@ -20,9 +20,7 @@ define <vscale x 1 x i8> @vshl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i8> %va, %splat
+  %vc = shl <vscale x 1 x i8> %va, splat (i8 6)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -44,9 +42,7 @@ define <vscale x 2 x i8> @vshl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i8> %va, %splat
+  %vc = shl <vscale x 2 x i8> %va, splat (i8 6)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -68,9 +64,7 @@ define <vscale x 4 x i8> @vshl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i8> %va, %splat
+  %vc = shl <vscale x 4 x i8> %va, splat (i8 6)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -92,9 +86,7 @@ define <vscale x 8 x i8> @vshl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i8> %va, %splat
+  %vc = shl <vscale x 8 x i8> %va, splat (i8 6)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -116,9 +108,7 @@ define <vscale x 16 x i8> @vshl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = shl <vscale x 16 x i8> %va, %splat
+  %vc = shl <vscale x 16 x i8> %va, splat (i8 6)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -140,9 +130,7 @@ define <vscale x 32 x i8> @vshl_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = shl <vscale x 32 x i8> %va, %splat
+  %vc = shl <vscale x 32 x i8> %va, splat (i8 6)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -164,9 +152,7 @@ define <vscale x 64 x i8> @vshl_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = shl <vscale x 64 x i8> %va, %splat
+  %vc = shl <vscale x 64 x i8> %va, splat (i8 6)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -188,9 +174,7 @@ define <vscale x 1 x i16> @vshl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i16> %va, %splat
+  %vc = shl <vscale x 1 x i16> %va, splat (i16 6)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -212,9 +196,7 @@ define <vscale x 2 x i16> @vshl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i16> %va, %splat
+  %vc = shl <vscale x 2 x i16> %va, splat (i16 6)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -236,9 +218,7 @@ define <vscale x 4 x i16> @vshl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i16> %va, %splat
+  %vc = shl <vscale x 4 x i16> %va, splat (i16 6)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -260,9 +240,7 @@ define <vscale x 8 x i16> @vshl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i16> %va, %splat
+  %vc = shl <vscale x 8 x i16> %va, splat (i16 6)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -284,9 +262,7 @@ define <vscale x 16 x i16> @vshl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = shl <vscale x 16 x i16> %va, %splat
+  %vc = shl <vscale x 16 x i16> %va, splat (i16 6)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -308,9 +284,7 @@ define <vscale x 32 x i16> @vshl_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = shl <vscale x 32 x i16> %va, %splat
+  %vc = shl <vscale x 32 x i16> %va, splat (i16 6)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -332,9 +306,7 @@ define <vscale x 1 x i32> @vshl_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i32> %va, %splat
+  %vc = shl <vscale x 1 x i32> %va, splat (i32 31)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -356,9 +328,7 @@ define <vscale x 2 x i32> @vshl_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i32> %va, %splat
+  %vc = shl <vscale x 2 x i32> %va, splat (i32 31)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -380,9 +350,7 @@ define <vscale x 4 x i32> @vshl_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i32> %va, %splat
+  %vc = shl <vscale x 4 x i32> %va, splat (i32 31)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -404,9 +372,7 @@ define <vscale x 8 x i32> @vshl_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i32> %va, %splat
+  %vc = shl <vscale x 8 x i32> %va, splat (i32 31)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -428,9 +394,7 @@ define <vscale x 16 x i32> @vshl_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = shl <vscale x 16 x i32> %va, %splat
+  %vc = shl <vscale x 16 x i32> %va, splat (i32 31)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -452,9 +416,7 @@ define <vscale x 1 x i64> @vshl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i64> %va, %splat
+  %vc = shl <vscale x 1 x i64> %va, splat (i64 31)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -465,9 +427,7 @@ define <vscale x 1 x i64> @vshl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i64> %va, %splat
+  %vc = shl <vscale x 1 x i64> %va, splat (i64 32)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -477,9 +437,7 @@ define <vscale x 1 x i64> @vshl_vx_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = shl <vscale x 1 x i64> %va, %splat
+  %vc = shl <vscale x 1 x i64> %va, splat (i64 1)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -501,9 +459,7 @@ define <vscale x 2 x i64> @vshl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i64> %va, %splat
+  %vc = shl <vscale x 2 x i64> %va, splat (i64 31)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -514,9 +470,7 @@ define <vscale x 2 x i64> @vshl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i64> %va, %splat
+  %vc = shl <vscale x 2 x i64> %va, splat (i64 32)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -526,9 +480,7 @@ define <vscale x 2 x i64> @vshl_vx_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = shl <vscale x 2 x i64> %va, %splat
+  %vc = shl <vscale x 2 x i64> %va, splat (i64 1)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -550,9 +502,7 @@ define <vscale x 4 x i64> @vshl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i64> %va, %splat
+  %vc = shl <vscale x 4 x i64> %va, splat (i64 31)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -563,9 +513,7 @@ define <vscale x 4 x i64> @vshl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i64> %va, %splat
+  %vc = shl <vscale x 4 x i64> %va, splat (i64 32)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -575,9 +523,7 @@ define <vscale x 4 x i64> @vshl_vx_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = shl <vscale x 4 x i64> %va, %splat
+  %vc = shl <vscale x 4 x i64> %va, splat (i64 1)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -599,9 +545,7 @@ define <vscale x 8 x i64> @vshl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i64> %va, %splat
+  %vc = shl <vscale x 8 x i64> %va, splat (i64 31)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -612,9 +556,7 @@ define <vscale x 8 x i64> @vshl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i64> %va, %splat
+  %vc = shl <vscale x 8 x i64> %va, splat (i64 32)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -624,9 +566,7 @@ define <vscale x 8 x i64> @vshl_vx_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = shl <vscale x 8 x i64> %va, %splat
+  %vc = shl <vscale x 8 x i64> %va, splat (i64 1)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -660,9 +600,7 @@ define <vscale x 8 x i32> @vshl_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 31, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 31), <vscale x 8 x i32> zeroinitializer
   %vc = shl <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
index 9ab5218e897e71..c04d5ea2da3c1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
@@ -40,9 +40,7 @@ define <vscale x 1 x i8> @vsll_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -66,9 +64,7 @@ define <vscale x 1 x i8> @vsll_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -78,9 +74,7 @@ define <vscale x 1 x i8> @vsll_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 3), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -90,11 +84,7 @@ define <vscale x 1 x i8> @vsll_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -116,9 +106,7 @@ define <vscale x 2 x i8> @vsll_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -142,9 +130,7 @@ define <vscale x 2 x i8> @vsll_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -154,9 +140,7 @@ define <vscale x 2 x i8> @vsll_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 3), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -166,11 +150,7 @@ define <vscale x 2 x i8> @vsll_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -192,9 +172,7 @@ define <vscale x 4 x i8> @vsll_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -218,9 +196,7 @@ define <vscale x 4 x i8> @vsll_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -230,9 +206,7 @@ define <vscale x 4 x i8> @vsll_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 3), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -242,11 +216,7 @@ define <vscale x 4 x i8> @vsll_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -280,9 +250,7 @@ define <vscale x 8 x i8> @vsll_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -306,9 +274,7 @@ define <vscale x 8 x i8> @vsll_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -318,9 +284,7 @@ define <vscale x 8 x i8> @vsll_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 3), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -330,11 +294,7 @@ define <vscale x 8 x i8> @vsll_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -356,9 +316,7 @@ define <vscale x 16 x i8> @vsll_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -382,9 +340,7 @@ define <vscale x 16 x i8> @vsll_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -394,9 +350,7 @@ define <vscale x 16 x i8> @vsll_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 3), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -406,11 +360,7 @@ define <vscale x 16 x i8> @vsll_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -432,9 +382,7 @@ define <vscale x 32 x i8> @vsll_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -458,9 +406,7 @@ define <vscale x 32 x i8> @vsll_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -470,9 +416,7 @@ define <vscale x 32 x i8> @vsll_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 3), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -482,11 +426,7 @@ define <vscale x 32 x i8> @vsll_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 3), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -508,9 +448,7 @@ define <vscale x 64 x i8> @vsll_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -534,9 +472,7 @@ define <vscale x 64 x i8> @vsll_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -546,9 +482,7 @@ define <vscale x 64 x i8> @vsll_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 3), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -558,11 +492,7 @@ define <vscale x 64 x i8> @vsll_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 3), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -584,9 +514,7 @@ define <vscale x 1 x i16> @vsll_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -610,9 +538,7 @@ define <vscale x 1 x i16> @vsll_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -622,9 +548,7 @@ define <vscale x 1 x i16> @vsll_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 3), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -634,11 +558,7 @@ define <vscale x 1 x i16> @vsll_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -660,9 +580,7 @@ define <vscale x 2 x i16> @vsll_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -686,9 +604,7 @@ define <vscale x 2 x i16> @vsll_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -698,9 +614,7 @@ define <vscale x 2 x i16> @vsll_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 3), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -710,11 +624,7 @@ define <vscale x 2 x i16> @vsll_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -736,9 +646,7 @@ define <vscale x 4 x i16> @vsll_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -762,9 +670,7 @@ define <vscale x 4 x i16> @vsll_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -774,9 +680,7 @@ define <vscale x 4 x i16> @vsll_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 3), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -786,11 +690,7 @@ define <vscale x 4 x i16> @vsll_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -812,9 +712,7 @@ define <vscale x 8 x i16> @vsll_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -838,9 +736,7 @@ define <vscale x 8 x i16> @vsll_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -850,9 +746,7 @@ define <vscale x 8 x i16> @vsll_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 3), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -862,11 +756,7 @@ define <vscale x 8 x i16> @vsll_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -888,9 +778,7 @@ define <vscale x 16 x i16> @vsll_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -914,9 +802,7 @@ define <vscale x 16 x i16> @vsll_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -926,9 +812,7 @@ define <vscale x 16 x i16> @vsll_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 3), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -938,11 +822,7 @@ define <vscale x 16 x i16> @vsll_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -964,9 +844,7 @@ define <vscale x 32 x i16> @vsll_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -990,9 +868,7 @@ define <vscale x 32 x i16> @vsll_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1002,9 +878,7 @@ define <vscale x 32 x i16> @vsll_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 3), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1014,11 +888,7 @@ define <vscale x 32 x i16> @vsll_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 3), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1040,9 +910,7 @@ define <vscale x 1 x i32> @vsll_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1066,9 +934,7 @@ define <vscale x 1 x i32> @vsll_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1078,9 +944,7 @@ define <vscale x 1 x i32> @vsll_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 3), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1090,11 +954,7 @@ define <vscale x 1 x i32> @vsll_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1116,9 +976,7 @@ define <vscale x 2 x i32> @vsll_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1142,9 +1000,7 @@ define <vscale x 2 x i32> @vsll_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1154,9 +1010,7 @@ define <vscale x 2 x i32> @vsll_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 3), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1166,11 +1020,7 @@ define <vscale x 2 x i32> @vsll_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1192,9 +1042,7 @@ define <vscale x 4 x i32> @vsll_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1218,9 +1066,7 @@ define <vscale x 4 x i32> @vsll_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1230,9 +1076,7 @@ define <vscale x 4 x i32> @vsll_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 3), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1242,11 +1086,7 @@ define <vscale x 4 x i32> @vsll_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1268,9 +1108,7 @@ define <vscale x 8 x i32> @vsll_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1294,9 +1132,7 @@ define <vscale x 8 x i32> @vsll_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1306,9 +1142,7 @@ define <vscale x 8 x i32> @vsll_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 3), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1318,11 +1152,7 @@ define <vscale x 8 x i32> @vsll_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1344,9 +1174,7 @@ define <vscale x 16 x i32> @vsll_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1370,9 +1198,7 @@ define <vscale x 16 x i32> @vsll_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1382,9 +1208,7 @@ define <vscale x 16 x i32> @vsll_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 3), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1394,11 +1218,7 @@ define <vscale x 16 x i32> @vsll_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 3), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1420,9 +1240,7 @@ define <vscale x 1 x i64> @vsll_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1458,9 +1276,7 @@ define <vscale x 1 x i64> @vsll_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1470,9 +1286,7 @@ define <vscale x 1 x i64> @vsll_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 3), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1482,11 +1296,7 @@ define <vscale x 1 x i64> @vsll_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 3), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1508,9 +1318,7 @@ define <vscale x 2 x i64> @vsll_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1546,9 +1354,7 @@ define <vscale x 2 x i64> @vsll_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1558,9 +1364,7 @@ define <vscale x 2 x i64> @vsll_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 3), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1570,11 +1374,7 @@ define <vscale x 2 x i64> @vsll_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 3), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1596,9 +1396,7 @@ define <vscale x 4 x i64> @vsll_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1634,9 +1432,7 @@ define <vscale x 4 x i64> @vsll_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1646,9 +1442,7 @@ define <vscale x 4 x i64> @vsll_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 3), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1658,11 +1452,7 @@ define <vscale x 4 x i64> @vsll_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 3), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1684,9 +1474,7 @@ define <vscale x 8 x i64> @vsll_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1722,9 +1510,7 @@ define <vscale x 8 x i64> @vsll_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1734,9 +1520,7 @@ define <vscale x 8 x i64> @vsll_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 3), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1746,10 +1530,6 @@ define <vscale x 8 x i64> @vsll_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsll.vi v8, v8, 3
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 3), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
index bb0f26046bdb33..707ef8a94432d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -25,9 +25,7 @@ define <vscale x 8 x half> @vsplat_zero_nxv8f16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x half> poison, half zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x half> %splat
+  ret <vscale x 8 x half> splat (half zeroinitializer)
 }
 
 define <vscale x 8 x float> @vsplat_nxv8f32(float %f) {
@@ -47,9 +45,7 @@ define <vscale x 8 x float> @vsplat_zero_nxv8f32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x float> %splat
+  ret <vscale x 8 x float> splat (float zeroinitializer)
 }
 
 define <vscale x 8 x double> @vsplat_nxv8f64(double %f) {
@@ -69,9 +65,7 @@ define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x double> poison, double zeroinitializer, i32 0
-  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x double> %splat
+  ret <vscale x 8 x double> splat (double zeroinitializer)
 }
 
 ; Test that we fold this to a vlse with 0 stride.
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll
index f462f242f06852..09a713a6826cbf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll
@@ -8,9 +8,7 @@ define <vscale x 1 x i1> @vsplat_nxv1i1_0() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 0, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  ret <vscale x 1 x i1> %splat
+  ret <vscale x 1 x i1> splat (i1 0)
 }
 
 define <vscale x 1 x i1> @vsplat_nxv1i1_1() {
@@ -19,9 +17,7 @@ define <vscale x 1 x i1> @vsplat_nxv1i1_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  ret <vscale x 1 x i1> %splat
+  ret <vscale x 1 x i1> splat (i1 -1)
 }
 
 define <vscale x 1 x i1> @vsplat_nxv1i1_2(i1 %x) {
@@ -58,9 +54,7 @@ define <vscale x 2 x i1> @vsplat_nxv2i1_0() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 0, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i1> %splat
+  ret <vscale x 2 x i1> splat (i1 0)
 }
 
 define <vscale x 2 x i1> @vsplat_nxv2i1_1() {
@@ -69,9 +63,7 @@ define <vscale x 2 x i1> @vsplat_nxv2i1_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  ret <vscale x 2 x i1> %splat
+  ret <vscale x 2 x i1> splat (i1 -1)
 }
 
 define <vscale x 2 x i1> @vsplat_nxv2i1_2(i1 %x) {
@@ -93,9 +85,7 @@ define <vscale x 4 x i1> @vsplat_nxv4i1_0() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 0, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x i1> %splat
+  ret <vscale x 4 x i1> splat (i1 0)
 }
 
 define <vscale x 4 x i1> @vsplat_nxv4i1_1() {
@@ -104,9 +94,7 @@ define <vscale x 4 x i1> @vsplat_nxv4i1_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  ret <vscale x 4 x i1> %splat
+  ret <vscale x 4 x i1> splat (i1 -1)
 }
 
 define <vscale x 4 x i1> @vsplat_nxv4i1_2(i1 %x) {
@@ -128,9 +116,7 @@ define <vscale x 8 x i1> @vsplat_nxv8i1_0() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 0, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i1> %splat
+  ret <vscale x 8 x i1> splat (i1 0)
 }
 
 define <vscale x 8 x i1> @vsplat_nxv8i1_1() {
@@ -139,9 +125,7 @@ define <vscale x 8 x i1> @vsplat_nxv8i1_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i1> %splat
+  ret <vscale x 8 x i1> splat (i1 -1)
 }
 
 define <vscale x 8 x i1> @vsplat_nxv8i1_2(i1 %x) {
@@ -163,9 +147,7 @@ define <vscale x 16 x i1> @vsplat_nxv16i1_0() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 0, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  ret <vscale x 16 x i1> %splat
+  ret <vscale x 16 x i1> splat (i1 0)
 }
 
 define <vscale x 16 x i1> @vsplat_nxv16i1_1() {
@@ -174,9 +156,7 @@ define <vscale x 16 x i1> @vsplat_nxv16i1_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmset.m v0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  ret <vscale x 16 x i1> %splat
+  ret <vscale x 16 x i1> splat (i1 -1)
 }
 
 define <vscale x 16 x i1> @vsplat_nxv16i1_2(i1 %x) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
index d428fceb69116b..721d6ef26d61e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
@@ -10,9 +10,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i64> %splat
+  ret <vscale x 8 x i64> splat (i64 -1)
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_2() {
@@ -21,9 +19,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_2() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 4
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i64> %splat
+  ret <vscale x 8 x i64> splat (i64 4)
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
@@ -33,9 +29,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.x v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 255, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i64> %splat
+  ret <vscale x 8 x i64> splat (i64 255)
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
@@ -61,9 +55,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV64V-NEXT:    vmv.v.x v8, a0
 ; RV64V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 4211079935, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x i64> %splat
+  ret <vscale x 8 x i64> splat (i64 4211079935)
 }
 
 define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) {
@@ -95,9 +87,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_6(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, 2
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 2)
   ret <vscale x 8 x i64> %vret
 }
 
@@ -107,9 +97,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 -1)
   ret <vscale x 8 x i64> %vret
 }
 
@@ -120,9 +108,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_8(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 255, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 255)
   ret <vscale x 8 x i64> %vret
 }
 
@@ -142,9 +128,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_9(<vscale x 8 x i64> %v) {
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV64V-NEXT:    vadd.vx v8, v8, a0
 ; RV64V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 2063596287, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 2063596287)
   ret <vscale x 8 x i64> %vret
 }
 
@@ -172,9 +156,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV64V-NEXT:    vadd.vx v8, v8, a0
 ; RV64V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 4211079935, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 4211079935)
   ret <vscale x 8 x i64> %vret
 }
 
@@ -203,9 +185,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV64V-NEXT:    vadd.vx v8, v8, a0
 ; RV64V-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 8506047231, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vret = add <vscale x 8 x i64> %v, %splat
+  %vret = add <vscale x 8 x i64> %v, splat (i64 8506047231)
   ret <vscale x 8 x i64> %vret
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
index 59be018efb857b..1047860ec8db61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-zfa.ll
@@ -11,9 +11,7 @@ define <vscale x 8 x half> @vsplat_f16_0p625() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x half> poison, half 0.625, i32 0
-  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x half> %splat
+  ret <vscale x 8 x half> splat (half 0.625)
 }
 
 define <vscale x 8 x float> @vsplat_f32_0p75() {
@@ -23,9 +21,7 @@ define <vscale x 8 x float> @vsplat_f32_0p75() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x float> poison, float 0.75, i32 0
-  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x float> %splat
+  ret <vscale x 8 x float> splat (float 0.75)
 }
 
 define <vscale x 8 x double> @vsplat_f64_neg1() {
@@ -35,7 +31,5 @@ define <vscale x 8 x double> @vsplat_f64_neg1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmv.v.f v8, fa5
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x double> poison, double -1.0, i32 0
-  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  ret <vscale x 8 x double> %splat
+  ret <vscale x 8 x double> splat (double -1.0)
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
index 738e9cf805b46f..7bae84142d8ae6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
@@ -45,9 +45,7 @@ define <vscale x 1 x i8> @vsra_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = ashr <vscale x 1 x i8> %va, %splat
+  %vc = ashr <vscale x 1 x i8> %va, splat (i8 6)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -94,9 +92,7 @@ define <vscale x 2 x i8> @vsra_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = ashr <vscale x 2 x i8> %va, %splat
+  %vc = ashr <vscale x 2 x i8> %va, splat (i8 6)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -143,9 +139,7 @@ define <vscale x 4 x i8> @vsra_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = ashr <vscale x 4 x i8> %va, %splat
+  %vc = ashr <vscale x 4 x i8> %va, splat (i8 6)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -192,9 +186,7 @@ define <vscale x 8 x i8> @vsra_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = ashr <vscale x 8 x i8> %va, %splat
+  %vc = ashr <vscale x 8 x i8> %va, splat (i8 6)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -241,9 +233,7 @@ define <vscale x 16 x i8> @vsra_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = ashr <vscale x 16 x i8> %va, %splat
+  %vc = ashr <vscale x 16 x i8> %va, splat (i8 6)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -275,9 +265,7 @@ define <vscale x 32 x i8> @vsra_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = ashr <vscale x 32 x i8> %va, %splat
+  %vc = ashr <vscale x 32 x i8> %va, splat (i8 6)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -309,9 +297,7 @@ define <vscale x 64 x i8> @vsra_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = ashr <vscale x 64 x i8> %va, %splat
+  %vc = ashr <vscale x 64 x i8> %va, splat (i8 6)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -358,9 +344,7 @@ define <vscale x 1 x i16> @vsra_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = ashr <vscale x 1 x i16> %va, %splat
+  %vc = ashr <vscale x 1 x i16> %va, splat (i16 6)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -407,9 +391,7 @@ define <vscale x 2 x i16> @vsra_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = ashr <vscale x 2 x i16> %va, %splat
+  %vc = ashr <vscale x 2 x i16> %va, splat (i16 6)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -456,9 +438,7 @@ define <vscale x 4 x i16> @vsra_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = ashr <vscale x 4 x i16> %va, %splat
+  %vc = ashr <vscale x 4 x i16> %va, splat (i16 6)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -505,9 +485,7 @@ define <vscale x 8 x i16> @vsra_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = ashr <vscale x 8 x i16> %va, %splat
+  %vc = ashr <vscale x 8 x i16> %va, splat (i16 6)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -554,9 +532,7 @@ define <vscale x 16 x i16> @vsra_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = ashr <vscale x 16 x i16> %va, %splat
+  %vc = ashr <vscale x 16 x i16> %va, splat (i16 6)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -588,9 +564,7 @@ define <vscale x 32 x i16> @vsra_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = ashr <vscale x 32 x i16> %va, %splat
+  %vc = ashr <vscale x 32 x i16> %va, splat (i16 6)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -622,9 +596,7 @@ define <vscale x 1 x i32> @vsra_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = ashr <vscale x 1 x i32> %va, %splat
+  %vc = ashr <vscale x 1 x i32> %va, splat (i32 31)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -656,9 +628,7 @@ define <vscale x 2 x i32> @vsra_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = ashr <vscale x 2 x i32> %va, %splat
+  %vc = ashr <vscale x 2 x i32> %va, splat (i32 31)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -690,9 +660,7 @@ define <vscale x 4 x i32> @vsra_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = ashr <vscale x 4 x i32> %va, %splat
+  %vc = ashr <vscale x 4 x i32> %va, splat (i32 31)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -724,9 +692,7 @@ define <vscale x 8 x i32> @vsra_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = ashr <vscale x 8 x i32> %va, %splat
+  %vc = ashr <vscale x 8 x i32> %va, splat (i32 31)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -758,9 +724,7 @@ define <vscale x 16 x i32> @vsra_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = ashr <vscale x 16 x i32> %va, %splat
+  %vc = ashr <vscale x 16 x i32> %va, splat (i32 31)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -792,9 +756,7 @@ define <vscale x 1 x i64> @vsra_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = ashr <vscale x 1 x i64> %va, %splat
+  %vc = ashr <vscale x 1 x i64> %va, splat (i64 31)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -805,9 +767,7 @@ define <vscale x 1 x i64> @vsra_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = ashr <vscale x 1 x i64> %va, %splat
+  %vc = ashr <vscale x 1 x i64> %va, splat (i64 32)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -839,9 +799,7 @@ define <vscale x 2 x i64> @vsra_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = ashr <vscale x 2 x i64> %va, %splat
+  %vc = ashr <vscale x 2 x i64> %va, splat (i64 31)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -852,9 +810,7 @@ define <vscale x 2 x i64> @vsra_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = ashr <vscale x 2 x i64> %va, %splat
+  %vc = ashr <vscale x 2 x i64> %va, splat (i64 32)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -886,9 +842,7 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = ashr <vscale x 4 x i64> %va, %splat
+  %vc = ashr <vscale x 4 x i64> %va, splat (i64 31)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -899,9 +853,7 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = ashr <vscale x 4 x i64> %va, %splat
+  %vc = ashr <vscale x 4 x i64> %va, splat (i64 32)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -933,9 +885,7 @@ define <vscale x 8 x i64> @vsra_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = ashr <vscale x 8 x i64> %va, %splat
+  %vc = ashr <vscale x 8 x i64> %va, splat (i64 31)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -946,9 +896,7 @@ define <vscale x 8 x i64> @vsra_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = ashr <vscale x 8 x i64> %va, %splat
+  %vc = ashr <vscale x 8 x i64> %va, splat (i64 32)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -982,9 +930,7 @@ define <vscale x 8 x i32> @vsra_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 31, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 31), <vscale x 8 x i32> zeroinitializer
   %vc = ashr <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
index 35100d781f059a..632c4db5c5bb57 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
@@ -42,9 +42,7 @@ define <vscale x 1 x i8> @vsra_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -68,9 +66,7 @@ define <vscale x 1 x i8> @vsra_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -80,9 +76,7 @@ define <vscale x 1 x i8> @vsra_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -92,11 +86,7 @@ define <vscale x 1 x i8> @vsra_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -118,9 +108,7 @@ define <vscale x 2 x i8> @vsra_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -144,9 +132,7 @@ define <vscale x 2 x i8> @vsra_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -156,9 +142,7 @@ define <vscale x 2 x i8> @vsra_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -168,11 +152,7 @@ define <vscale x 2 x i8> @vsra_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -194,9 +174,7 @@ define <vscale x 4 x i8> @vsra_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -220,9 +198,7 @@ define <vscale x 4 x i8> @vsra_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -232,9 +208,7 @@ define <vscale x 4 x i8> @vsra_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -244,11 +218,7 @@ define <vscale x 4 x i8> @vsra_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -270,9 +240,7 @@ define <vscale x 8 x i8> @vsra_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -296,9 +264,7 @@ define <vscale x 8 x i8> @vsra_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -308,9 +274,7 @@ define <vscale x 8 x i8> @vsra_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -320,11 +284,7 @@ define <vscale x 8 x i8> @vsra_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -346,9 +306,7 @@ define <vscale x 16 x i8> @vsra_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -372,9 +330,7 @@ define <vscale x 16 x i8> @vsra_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -384,9 +340,7 @@ define <vscale x 16 x i8> @vsra_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -396,11 +350,7 @@ define <vscale x 16 x i8> @vsra_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -422,9 +372,7 @@ define <vscale x 32 x i8> @vsra_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -448,9 +396,7 @@ define <vscale x 32 x i8> @vsra_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -460,9 +406,7 @@ define <vscale x 32 x i8> @vsra_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 5), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -472,11 +416,7 @@ define <vscale x 32 x i8> @vsra_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 5), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -498,9 +438,7 @@ define <vscale x 64 x i8> @vsra_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -524,9 +462,7 @@ define <vscale x 64 x i8> @vsra_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -536,9 +472,7 @@ define <vscale x 64 x i8> @vsra_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 5), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -548,11 +482,7 @@ define <vscale x 64 x i8> @vsra_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 5), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -574,9 +504,7 @@ define <vscale x 1 x i16> @vsra_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -600,9 +528,7 @@ define <vscale x 1 x i16> @vsra_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -612,9 +538,7 @@ define <vscale x 1 x i16> @vsra_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -624,11 +548,7 @@ define <vscale x 1 x i16> @vsra_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -650,9 +570,7 @@ define <vscale x 2 x i16> @vsra_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -676,9 +594,7 @@ define <vscale x 2 x i16> @vsra_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -688,9 +604,7 @@ define <vscale x 2 x i16> @vsra_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -700,11 +614,7 @@ define <vscale x 2 x i16> @vsra_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -726,9 +636,7 @@ define <vscale x 4 x i16> @vsra_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -752,9 +660,7 @@ define <vscale x 4 x i16> @vsra_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -764,9 +670,7 @@ define <vscale x 4 x i16> @vsra_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -776,11 +680,7 @@ define <vscale x 4 x i16> @vsra_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -802,9 +702,7 @@ define <vscale x 8 x i16> @vsra_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -828,9 +726,7 @@ define <vscale x 8 x i16> @vsra_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -840,9 +736,7 @@ define <vscale x 8 x i16> @vsra_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -852,11 +746,7 @@ define <vscale x 8 x i16> @vsra_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -878,9 +768,7 @@ define <vscale x 16 x i16> @vsra_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -904,9 +792,7 @@ define <vscale x 16 x i16> @vsra_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -916,9 +802,7 @@ define <vscale x 16 x i16> @vsra_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -928,11 +812,7 @@ define <vscale x 16 x i16> @vsra_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -954,9 +834,7 @@ define <vscale x 32 x i16> @vsra_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -980,9 +858,7 @@ define <vscale x 32 x i16> @vsra_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -992,9 +868,7 @@ define <vscale x 32 x i16> @vsra_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 5), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1004,11 +878,7 @@ define <vscale x 32 x i16> @vsra_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 5), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1030,9 +900,7 @@ define <vscale x 1 x i32> @vsra_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1056,9 +924,7 @@ define <vscale x 1 x i32> @vsra_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1068,9 +934,7 @@ define <vscale x 1 x i32> @vsra_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1080,11 +944,7 @@ define <vscale x 1 x i32> @vsra_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1106,9 +966,7 @@ define <vscale x 2 x i32> @vsra_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1132,9 +990,7 @@ define <vscale x 2 x i32> @vsra_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1144,9 +1000,7 @@ define <vscale x 2 x i32> @vsra_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1156,11 +1010,7 @@ define <vscale x 2 x i32> @vsra_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1182,9 +1032,7 @@ define <vscale x 4 x i32> @vsra_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1208,9 +1056,7 @@ define <vscale x 4 x i32> @vsra_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1220,9 +1066,7 @@ define <vscale x 4 x i32> @vsra_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1232,11 +1076,7 @@ define <vscale x 4 x i32> @vsra_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1258,9 +1098,7 @@ define <vscale x 8 x i32> @vsra_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1284,9 +1122,7 @@ define <vscale x 8 x i32> @vsra_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1296,9 +1132,7 @@ define <vscale x 8 x i32> @vsra_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1308,11 +1142,7 @@ define <vscale x 8 x i32> @vsra_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1334,9 +1164,7 @@ define <vscale x 16 x i32> @vsra_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1360,9 +1188,7 @@ define <vscale x 16 x i32> @vsra_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1372,9 +1198,7 @@ define <vscale x 16 x i32> @vsra_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 5), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1384,11 +1208,7 @@ define <vscale x 16 x i32> @vsra_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 5), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1410,9 +1230,7 @@ define <vscale x 1 x i64> @vsra_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1448,9 +1266,7 @@ define <vscale x 1 x i64> @vsra_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1460,9 +1276,7 @@ define <vscale x 1 x i64> @vsra_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 5), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1472,11 +1286,7 @@ define <vscale x 1 x i64> @vsra_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 5), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1498,9 +1308,7 @@ define <vscale x 2 x i64> @vsra_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1536,9 +1344,7 @@ define <vscale x 2 x i64> @vsra_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1548,9 +1354,7 @@ define <vscale x 2 x i64> @vsra_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 5), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1560,11 +1364,7 @@ define <vscale x 2 x i64> @vsra_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 5), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1586,9 +1386,7 @@ define <vscale x 4 x i64> @vsra_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1624,9 +1422,7 @@ define <vscale x 4 x i64> @vsra_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1636,9 +1432,7 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 5), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1648,11 +1442,7 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 5), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1686,9 +1476,7 @@ define <vscale x 8 x i64> @vsra_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1724,9 +1512,7 @@ define <vscale x 8 x i64> @vsra_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1736,9 +1522,7 @@ define <vscale x 8 x i64> @vsra_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 5), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1748,10 +1532,6 @@ define <vscale x 8 x i64> @vsra_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsra.vi v8, v8, 5
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 5), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
index be70b20181b148..6f4538926bef29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
@@ -20,9 +20,7 @@ define <vscale x 1 x i8> @vsrl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = lshr <vscale x 1 x i8> %va, %splat
+  %vc = lshr <vscale x 1 x i8> %va, splat (i8 6)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -44,9 +42,7 @@ define <vscale x 2 x i8> @vsrl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = lshr <vscale x 2 x i8> %va, %splat
+  %vc = lshr <vscale x 2 x i8> %va, splat (i8 6)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -68,9 +64,7 @@ define <vscale x 4 x i8> @vsrl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = lshr <vscale x 4 x i8> %va, %splat
+  %vc = lshr <vscale x 4 x i8> %va, splat (i8 6)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -92,9 +86,7 @@ define <vscale x 8 x i8> @vsrl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = lshr <vscale x 8 x i8> %va, %splat
+  %vc = lshr <vscale x 8 x i8> %va, splat (i8 6)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -116,9 +108,7 @@ define <vscale x 16 x i8> @vsrl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = lshr <vscale x 16 x i8> %va, %splat
+  %vc = lshr <vscale x 16 x i8> %va, splat (i8 6)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -140,9 +130,7 @@ define <vscale x 32 x i8> @vsrl_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = lshr <vscale x 32 x i8> %va, %splat
+  %vc = lshr <vscale x 32 x i8> %va, splat (i8 6)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -164,9 +152,7 @@ define <vscale x 64 x i8> @vsrl_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 6, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = lshr <vscale x 64 x i8> %va, %splat
+  %vc = lshr <vscale x 64 x i8> %va, splat (i8 6)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -188,9 +174,7 @@ define <vscale x 1 x i16> @vsrl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = lshr <vscale x 1 x i16> %va, %splat
+  %vc = lshr <vscale x 1 x i16> %va, splat (i16 6)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -212,9 +196,7 @@ define <vscale x 2 x i16> @vsrl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = lshr <vscale x 2 x i16> %va, %splat
+  %vc = lshr <vscale x 2 x i16> %va, splat (i16 6)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -236,9 +218,7 @@ define <vscale x 4 x i16> @vsrl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = lshr <vscale x 4 x i16> %va, %splat
+  %vc = lshr <vscale x 4 x i16> %va, splat (i16 6)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -260,9 +240,7 @@ define <vscale x 8 x i16> @vsrl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = lshr <vscale x 8 x i16> %va, %splat
+  %vc = lshr <vscale x 8 x i16> %va, splat (i16 6)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -284,9 +262,7 @@ define <vscale x 16 x i16> @vsrl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = lshr <vscale x 16 x i16> %va, %splat
+  %vc = lshr <vscale x 16 x i16> %va, splat (i16 6)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -308,9 +284,7 @@ define <vscale x 32 x i16> @vsrl_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 6
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 6, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = lshr <vscale x 32 x i16> %va, %splat
+  %vc = lshr <vscale x 32 x i16> %va, splat (i16 6)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -332,9 +306,7 @@ define <vscale x 1 x i32> @vsrl_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = lshr <vscale x 1 x i32> %va, %splat
+  %vc = lshr <vscale x 1 x i32> %va, splat (i32 31)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -356,9 +328,7 @@ define <vscale x 2 x i32> @vsrl_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = lshr <vscale x 2 x i32> %va, %splat
+  %vc = lshr <vscale x 2 x i32> %va, splat (i32 31)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -380,9 +350,7 @@ define <vscale x 4 x i32> @vsrl_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = lshr <vscale x 4 x i32> %va, %splat
+  %vc = lshr <vscale x 4 x i32> %va, splat (i32 31)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -404,9 +372,7 @@ define <vscale x 8 x i32> @vsrl_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = lshr <vscale x 8 x i32> %va, %splat
+  %vc = lshr <vscale x 8 x i32> %va, splat (i32 31)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -428,9 +394,7 @@ define <vscale x 16 x i32> @vsrl_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = lshr <vscale x 16 x i32> %va, %splat
+  %vc = lshr <vscale x 16 x i32> %va, splat (i32 31)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -452,9 +416,7 @@ define <vscale x 1 x i64> @vsrl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = lshr <vscale x 1 x i64> %va, %splat
+  %vc = lshr <vscale x 1 x i64> %va, splat (i64 31)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -465,9 +427,7 @@ define <vscale x 1 x i64> @vsrl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = lshr <vscale x 1 x i64> %va, %splat
+  %vc = lshr <vscale x 1 x i64> %va, splat (i64 32)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -489,9 +449,7 @@ define <vscale x 2 x i64> @vsrl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = lshr <vscale x 2 x i64> %va, %splat
+  %vc = lshr <vscale x 2 x i64> %va, splat (i64 31)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -502,9 +460,7 @@ define <vscale x 2 x i64> @vsrl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = lshr <vscale x 2 x i64> %va, %splat
+  %vc = lshr <vscale x 2 x i64> %va, splat (i64 32)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -526,9 +482,7 @@ define <vscale x 4 x i64> @vsrl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = lshr <vscale x 4 x i64> %va, %splat
+  %vc = lshr <vscale x 4 x i64> %va, splat (i64 31)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -539,9 +493,7 @@ define <vscale x 4 x i64> @vsrl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = lshr <vscale x 4 x i64> %va, %splat
+  %vc = lshr <vscale x 4 x i64> %va, splat (i64 32)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -563,9 +515,7 @@ define <vscale x 8 x i64> @vsrl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 31, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = lshr <vscale x 8 x i64> %va, %splat
+  %vc = lshr <vscale x 8 x i64> %va, splat (i64 31)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -576,9 +526,7 @@ define <vscale x 8 x i64> @vsrl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 32, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = lshr <vscale x 8 x i64> %va, %splat
+  %vc = lshr <vscale x 8 x i64> %va, splat (i64 32)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -612,9 +560,7 @@ define <vscale x 8 x i32> @vsrl_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 31, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 31, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 31), <vscale x 8 x i32> zeroinitializer
   %vc = lshr <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
index 01d1f14da25276..ec5b7f3faf7ca8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vsrl_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -67,9 +65,7 @@ define <vscale x 1 x i8> @vsrl_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -79,9 +75,7 @@ define <vscale x 1 x i8> @vsrl_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -91,11 +85,7 @@ define <vscale x 1 x i8> @vsrl_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -117,9 +107,7 @@ define <vscale x 2 x i8> @vsrl_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -143,9 +131,7 @@ define <vscale x 2 x i8> @vsrl_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -155,9 +141,7 @@ define <vscale x 2 x i8> @vsrl_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -167,11 +151,7 @@ define <vscale x 2 x i8> @vsrl_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -193,9 +173,7 @@ define <vscale x 4 x i8> @vsrl_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -219,9 +197,7 @@ define <vscale x 4 x i8> @vsrl_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -231,9 +207,7 @@ define <vscale x 4 x i8> @vsrl_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -243,11 +217,7 @@ define <vscale x 4 x i8> @vsrl_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -269,9 +239,7 @@ define <vscale x 8 x i8> @vsrl_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -295,9 +263,7 @@ define <vscale x 8 x i8> @vsrl_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -307,9 +273,7 @@ define <vscale x 8 x i8> @vsrl_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -319,11 +283,7 @@ define <vscale x 8 x i8> @vsrl_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -345,9 +305,7 @@ define <vscale x 16 x i8> @vsrl_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -371,9 +329,7 @@ define <vscale x 16 x i8> @vsrl_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -383,9 +339,7 @@ define <vscale x 16 x i8> @vsrl_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -395,11 +349,7 @@ define <vscale x 16 x i8> @vsrl_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -421,9 +371,7 @@ define <vscale x 32 x i8> @vsrl_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -447,9 +395,7 @@ define <vscale x 32 x i8> @vsrl_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -459,9 +405,7 @@ define <vscale x 32 x i8> @vsrl_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -471,11 +415,7 @@ define <vscale x 32 x i8> @vsrl_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -497,9 +437,7 @@ define <vscale x 64 x i8> @vsrl_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -523,9 +461,7 @@ define <vscale x 64 x i8> @vsrl_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -535,9 +471,7 @@ define <vscale x 64 x i8> @vsrl_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -547,11 +481,7 @@ define <vscale x 64 x i8> @vsrl_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -573,9 +503,7 @@ define <vscale x 1 x i16> @vsrl_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -599,9 +527,7 @@ define <vscale x 1 x i16> @vsrl_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -611,9 +537,7 @@ define <vscale x 1 x i16> @vsrl_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -623,11 +547,7 @@ define <vscale x 1 x i16> @vsrl_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -649,9 +569,7 @@ define <vscale x 2 x i16> @vsrl_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -675,9 +593,7 @@ define <vscale x 2 x i16> @vsrl_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -687,9 +603,7 @@ define <vscale x 2 x i16> @vsrl_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -699,11 +613,7 @@ define <vscale x 2 x i16> @vsrl_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -725,9 +635,7 @@ define <vscale x 4 x i16> @vsrl_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -751,9 +659,7 @@ define <vscale x 4 x i16> @vsrl_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -763,9 +669,7 @@ define <vscale x 4 x i16> @vsrl_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -775,11 +679,7 @@ define <vscale x 4 x i16> @vsrl_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -801,9 +701,7 @@ define <vscale x 8 x i16> @vsrl_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -827,9 +725,7 @@ define <vscale x 8 x i16> @vsrl_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -839,9 +735,7 @@ define <vscale x 8 x i16> @vsrl_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -851,11 +745,7 @@ define <vscale x 8 x i16> @vsrl_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -877,9 +767,7 @@ define <vscale x 16 x i16> @vsrl_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -903,9 +791,7 @@ define <vscale x 16 x i16> @vsrl_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -915,9 +801,7 @@ define <vscale x 16 x i16> @vsrl_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -927,11 +811,7 @@ define <vscale x 16 x i16> @vsrl_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -953,9 +833,7 @@ define <vscale x 32 x i16> @vsrl_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -979,9 +857,7 @@ define <vscale x 32 x i16> @vsrl_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -991,9 +867,7 @@ define <vscale x 32 x i16> @vsrl_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1003,11 +877,7 @@ define <vscale x 32 x i16> @vsrl_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1029,9 +899,7 @@ define <vscale x 1 x i32> @vsrl_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1055,9 +923,7 @@ define <vscale x 1 x i32> @vsrl_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1067,9 +933,7 @@ define <vscale x 1 x i32> @vsrl_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1079,11 +943,7 @@ define <vscale x 1 x i32> @vsrl_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1105,9 +965,7 @@ define <vscale x 2 x i32> @vsrl_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1131,9 +989,7 @@ define <vscale x 2 x i32> @vsrl_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1143,9 +999,7 @@ define <vscale x 2 x i32> @vsrl_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1155,11 +1009,7 @@ define <vscale x 2 x i32> @vsrl_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1181,9 +1031,7 @@ define <vscale x 4 x i32> @vsrl_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1207,9 +1055,7 @@ define <vscale x 4 x i32> @vsrl_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1219,9 +1065,7 @@ define <vscale x 4 x i32> @vsrl_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1231,11 +1075,7 @@ define <vscale x 4 x i32> @vsrl_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1257,9 +1097,7 @@ define <vscale x 8 x i32> @vsrl_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1283,9 +1121,7 @@ define <vscale x 8 x i32> @vsrl_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1295,9 +1131,7 @@ define <vscale x 8 x i32> @vsrl_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1307,11 +1141,7 @@ define <vscale x 8 x i32> @vsrl_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1333,9 +1163,7 @@ define <vscale x 16 x i32> @vsrl_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1359,9 +1187,7 @@ define <vscale x 16 x i32> @vsrl_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1371,9 +1197,7 @@ define <vscale x 16 x i32> @vsrl_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1383,11 +1207,7 @@ define <vscale x 16 x i32> @vsrl_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1409,9 +1229,7 @@ define <vscale x 1 x i64> @vsrl_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1447,9 +1265,7 @@ define <vscale x 1 x i64> @vsrl_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1459,9 +1275,7 @@ define <vscale x 1 x i64> @vsrl_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1471,11 +1285,7 @@ define <vscale x 1 x i64> @vsrl_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1497,9 +1307,7 @@ define <vscale x 2 x i64> @vsrl_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1535,9 +1343,7 @@ define <vscale x 2 x i64> @vsrl_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1547,9 +1353,7 @@ define <vscale x 2 x i64> @vsrl_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1559,11 +1363,7 @@ define <vscale x 2 x i64> @vsrl_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1585,9 +1385,7 @@ define <vscale x 4 x i64> @vsrl_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1623,9 +1421,7 @@ define <vscale x 4 x i64> @vsrl_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1635,9 +1431,7 @@ define <vscale x 4 x i64> @vsrl_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1647,11 +1441,7 @@ define <vscale x 4 x i64> @vsrl_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1685,9 +1475,7 @@ define <vscale x 8 x i64> @vsrl_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1723,9 +1511,7 @@ define <vscale x 8 x i64> @vsrl_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1735,9 +1521,7 @@ define <vscale x 8 x i64> @vsrl_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1747,10 +1531,6 @@ define <vscale x 8 x i64> @vsrl_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vi v8, v8, 4
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
index d3289ba6e068a0..c043858c029473 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
@@ -35,9 +35,7 @@ define <vscale x 1 x i8> @ssub_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
+  %v = call <vscale x 1 x i8> @llvm.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 1))
   ret <vscale x 1 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <vscale x 2 x i8> @ssub_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
+  %v = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 1))
   ret <vscale x 2 x i8> %v
 }
 
@@ -109,9 +105,7 @@ define <vscale x 4 x i8> @ssub_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
+  %v = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 1))
   ret <vscale x 4 x i8> %v
 }
 
@@ -146,9 +140,7 @@ define <vscale x 8 x i8> @ssub_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
+  %v = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 1))
   ret <vscale x 8 x i8> %v
 }
 
@@ -183,9 +175,7 @@ define <vscale x 16 x i8> @ssub_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
+  %v = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 1))
   ret <vscale x 16 x i8> %v
 }
 
@@ -220,9 +210,7 @@ define <vscale x 32 x i8> @ssub_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
+  %v = call <vscale x 32 x i8> @llvm.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 1))
   ret <vscale x 32 x i8> %v
 }
 
@@ -257,9 +245,7 @@ define <vscale x 64 x i8> @ssub_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
+  %v = call <vscale x 64 x i8> @llvm.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 1))
   ret <vscale x 64 x i8> %v
 }
 
@@ -294,9 +280,7 @@ define <vscale x 1 x i16> @ssub_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
+  %v = call <vscale x 1 x i16> @llvm.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 1))
   ret <vscale x 1 x i16> %v
 }
 
@@ -331,9 +315,7 @@ define <vscale x 2 x i16> @ssub_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
+  %v = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 1))
   ret <vscale x 2 x i16> %v
 }
 
@@ -368,9 +350,7 @@ define <vscale x 4 x i16> @ssub_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
+  %v = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 1))
   ret <vscale x 4 x i16> %v
 }
 
@@ -405,9 +385,7 @@ define <vscale x 8 x i16> @ssub_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
+  %v = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 1))
   ret <vscale x 8 x i16> %v
 }
 
@@ -442,9 +420,7 @@ define <vscale x 16 x i16> @ssub_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
+  %v = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 1))
   ret <vscale x 16 x i16> %v
 }
 
@@ -479,9 +455,7 @@ define <vscale x 32 x i16> @ssub_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
+  %v = call <vscale x 32 x i16> @llvm.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 1))
   ret <vscale x 32 x i16> %v
 }
 
@@ -516,9 +490,7 @@ define <vscale x 1 x i32> @ssub_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
+  %v = call <vscale x 1 x i32> @llvm.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 1))
   ret <vscale x 1 x i32> %v
 }
 
@@ -553,9 +525,7 @@ define <vscale x 2 x i32> @ssub_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
+  %v = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 1))
   ret <vscale x 2 x i32> %v
 }
 
@@ -590,9 +560,7 @@ define <vscale x 4 x i32> @ssub_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
+  %v = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 1))
   ret <vscale x 4 x i32> %v
 }
 
@@ -627,9 +595,7 @@ define <vscale x 8 x i32> @ssub_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
+  %v = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 1))
   ret <vscale x 8 x i32> %v
 }
 
@@ -664,9 +630,7 @@ define <vscale x 16 x i32> @ssub_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
+  %v = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 1))
   ret <vscale x 16 x i32> %v
 }
 
@@ -714,9 +678,7 @@ define <vscale x 1 x i64> @ssub_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
+  %v = call <vscale x 1 x i64> @llvm.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 1))
   ret <vscale x 1 x i64> %v
 }
 
@@ -764,9 +726,7 @@ define <vscale x 2 x i64> @ssub_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
+  %v = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 1))
   ret <vscale x 2 x i64> %v
 }
 
@@ -814,9 +774,7 @@ define <vscale x 4 x i64> @ssub_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
+  %v = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 1))
   ret <vscale x 4 x i64> %v
 }
 
@@ -864,8 +822,6 @@ define <vscale x 8 x i64> @ssub_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
+  %v = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 1))
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index 2d51a2ee44f658..b56a0f40176cf3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -43,9 +43,7 @@ define <vscale x 1 x i8> @vssub_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -83,9 +81,7 @@ define <vscale x 1 x i8> @vssub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -96,9 +92,7 @@ define <vscale x 1 x i8> @vssub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -109,11 +103,7 @@ define <vscale x 1 x i8> @vssub_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -135,9 +125,7 @@ define <vscale x 2 x i8> @vssub_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -161,9 +149,7 @@ define <vscale x 2 x i8> @vssub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -174,9 +160,7 @@ define <vscale x 2 x i8> @vssub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -187,11 +171,7 @@ define <vscale x 2 x i8> @vssub_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -213,9 +193,7 @@ define <vscale x 3 x i8> @vssub_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -239,9 +217,7 @@ define <vscale x 3 x i8> @vssub_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -252,9 +228,7 @@ define <vscale x 3 x i8> @vssub_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -265,11 +239,7 @@ define <vscale x 3 x i8> @vssub_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -291,9 +261,7 @@ define <vscale x 4 x i8> @vssub_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -317,9 +285,7 @@ define <vscale x 4 x i8> @vssub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -330,9 +296,7 @@ define <vscale x 4 x i8> @vssub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -343,11 +307,7 @@ define <vscale x 4 x i8> @vssub_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -369,9 +329,7 @@ define <vscale x 8 x i8> @vssub_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -395,9 +353,7 @@ define <vscale x 8 x i8> @vssub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -408,9 +364,7 @@ define <vscale x 8 x i8> @vssub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -421,11 +375,7 @@ define <vscale x 8 x i8> @vssub_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -447,9 +397,7 @@ define <vscale x 16 x i8> @vssub_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -473,9 +421,7 @@ define <vscale x 16 x i8> @vssub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -486,9 +432,7 @@ define <vscale x 16 x i8> @vssub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -499,11 +443,7 @@ define <vscale x 16 x i8> @vssub_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -525,9 +465,7 @@ define <vscale x 32 x i8> @vssub_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -551,9 +489,7 @@ define <vscale x 32 x i8> @vssub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -564,9 +500,7 @@ define <vscale x 32 x i8> @vssub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -577,11 +511,7 @@ define <vscale x 32 x i8> @vssub_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -603,9 +533,7 @@ define <vscale x 64 x i8> @vssub_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -629,9 +557,7 @@ define <vscale x 64 x i8> @vssub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -642,9 +568,7 @@ define <vscale x 64 x i8> @vssub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -655,11 +579,7 @@ define <vscale x 64 x i8> @vssub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -690,9 +610,7 @@ define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -715,11 +633,7 @@ define <vscale x 128 x i8> @vssub_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -741,9 +655,7 @@ define <vscale x 1 x i16> @vssub_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -767,9 +679,7 @@ define <vscale x 1 x i16> @vssub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -780,9 +690,7 @@ define <vscale x 1 x i16> @vssub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -793,11 +701,7 @@ define <vscale x 1 x i16> @vssub_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -819,9 +723,7 @@ define <vscale x 2 x i16> @vssub_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -845,9 +747,7 @@ define <vscale x 2 x i16> @vssub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -858,9 +758,7 @@ define <vscale x 2 x i16> @vssub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -871,11 +769,7 @@ define <vscale x 2 x i16> @vssub_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -897,9 +791,7 @@ define <vscale x 4 x i16> @vssub_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -923,9 +815,7 @@ define <vscale x 4 x i16> @vssub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -936,9 +826,7 @@ define <vscale x 4 x i16> @vssub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -949,11 +837,7 @@ define <vscale x 4 x i16> @vssub_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -975,9 +859,7 @@ define <vscale x 8 x i16> @vssub_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1001,9 +883,7 @@ define <vscale x 8 x i16> @vssub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1014,9 +894,7 @@ define <vscale x 8 x i16> @vssub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1027,11 +905,7 @@ define <vscale x 8 x i16> @vssub_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1053,9 +927,7 @@ define <vscale x 16 x i16> @vssub_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1079,9 +951,7 @@ define <vscale x 16 x i16> @vssub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1092,9 +962,7 @@ define <vscale x 16 x i16> @vssub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1105,11 +973,7 @@ define <vscale x 16 x i16> @vssub_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1131,9 +995,7 @@ define <vscale x 32 x i16> @vssub_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1157,9 +1019,7 @@ define <vscale x 32 x i16> @vssub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1170,9 +1030,7 @@ define <vscale x 32 x i16> @vssub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1183,11 +1041,7 @@ define <vscale x 32 x i16> @vssub_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1209,9 +1063,7 @@ define <vscale x 1 x i32> @vssub_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1235,9 +1087,7 @@ define <vscale x 1 x i32> @vssub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1248,9 +1098,7 @@ define <vscale x 1 x i32> @vssub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1261,11 +1109,7 @@ define <vscale x 1 x i32> @vssub_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1287,9 +1131,7 @@ define <vscale x 2 x i32> @vssub_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1313,9 +1155,7 @@ define <vscale x 2 x i32> @vssub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1326,9 +1166,7 @@ define <vscale x 2 x i32> @vssub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1339,11 +1177,7 @@ define <vscale x 2 x i32> @vssub_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1365,9 +1199,7 @@ define <vscale x 4 x i32> @vssub_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1391,9 +1223,7 @@ define <vscale x 4 x i32> @vssub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1404,9 +1234,7 @@ define <vscale x 4 x i32> @vssub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1417,11 +1245,7 @@ define <vscale x 4 x i32> @vssub_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1443,9 +1267,7 @@ define <vscale x 8 x i32> @vssub_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1469,9 +1291,7 @@ define <vscale x 8 x i32> @vssub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1482,9 +1302,7 @@ define <vscale x 8 x i32> @vssub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1495,11 +1313,7 @@ define <vscale x 8 x i32> @vssub_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1521,9 +1335,7 @@ define <vscale x 16 x i32> @vssub_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1547,9 +1359,7 @@ define <vscale x 16 x i32> @vssub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1560,9 +1370,7 @@ define <vscale x 16 x i32> @vssub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1573,11 +1381,7 @@ define <vscale x 16 x i32> @vssub_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1609,9 +1413,7 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1634,11 +1436,7 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1660,9 +1458,7 @@ define <vscale x 1 x i64> @vssub_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1714,9 +1510,7 @@ define <vscale x 1 x i64> @vssub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1727,9 +1521,7 @@ define <vscale x 1 x i64> @vssub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1740,11 +1532,7 @@ define <vscale x 1 x i64> @vssub_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1766,9 +1554,7 @@ define <vscale x 2 x i64> @vssub_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1820,9 +1606,7 @@ define <vscale x 2 x i64> @vssub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1833,9 +1617,7 @@ define <vscale x 2 x i64> @vssub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1846,11 +1628,7 @@ define <vscale x 2 x i64> @vssub_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1872,9 +1650,7 @@ define <vscale x 4 x i64> @vssub_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1926,9 +1702,7 @@ define <vscale x 4 x i64> @vssub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1939,9 +1713,7 @@ define <vscale x 4 x i64> @vssub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1952,11 +1724,7 @@ define <vscale x 4 x i64> @vssub_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1978,9 +1746,7 @@ define <vscale x 8 x i64> @vssub_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2032,9 +1798,7 @@ define <vscale x 8 x i64> @vssub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2045,9 +1809,7 @@ define <vscale x 8 x i64> @vssub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2058,10 +1820,6 @@ define <vscale x 8 x i64> @vssub_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
index a5b481917caefc..5349548a213bc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
@@ -35,9 +35,7 @@ define <vscale x 1 x i8> @usub_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb)
+  %v = call <vscale x 1 x i8> @llvm.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 2))
   ret <vscale x 1 x i8> %v
 }
 
@@ -72,9 +70,7 @@ define <vscale x 2 x i8> @usub_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb)
+  %v = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 2))
   ret <vscale x 2 x i8> %v
 }
 
@@ -109,9 +105,7 @@ define <vscale x 4 x i8> @usub_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb)
+  %v = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 2))
   ret <vscale x 4 x i8> %v
 }
 
@@ -146,9 +140,7 @@ define <vscale x 8 x i8> @usub_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb)
+  %v = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 2))
   ret <vscale x 8 x i8> %v
 }
 
@@ -183,9 +175,7 @@ define <vscale x 16 x i8> @usub_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb)
+  %v = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 2))
   ret <vscale x 16 x i8> %v
 }
 
@@ -220,9 +210,7 @@ define <vscale x 32 x i8> @usub_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb)
+  %v = call <vscale x 32 x i8> @llvm.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 2))
   ret <vscale x 32 x i8> %v
 }
 
@@ -257,9 +245,7 @@ define <vscale x 64 x i8> @usub_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb)
+  %v = call <vscale x 64 x i8> @llvm.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 2))
   ret <vscale x 64 x i8> %v
 }
 
@@ -294,9 +280,7 @@ define <vscale x 1 x i16> @usub_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb)
+  %v = call <vscale x 1 x i16> @llvm.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 2))
   ret <vscale x 1 x i16> %v
 }
 
@@ -331,9 +315,7 @@ define <vscale x 2 x i16> @usub_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb)
+  %v = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 2))
   ret <vscale x 2 x i16> %v
 }
 
@@ -368,9 +350,7 @@ define <vscale x 4 x i16> @usub_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb)
+  %v = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 2))
   ret <vscale x 4 x i16> %v
 }
 
@@ -405,9 +385,7 @@ define <vscale x 8 x i16> @usub_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb)
+  %v = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 2))
   ret <vscale x 8 x i16> %v
 }
 
@@ -442,9 +420,7 @@ define <vscale x 16 x i16> @usub_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb)
+  %v = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 2))
   ret <vscale x 16 x i16> %v
 }
 
@@ -479,9 +455,7 @@ define <vscale x 32 x i16> @usub_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb)
+  %v = call <vscale x 32 x i16> @llvm.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 2))
   ret <vscale x 32 x i16> %v
 }
 
@@ -516,9 +490,7 @@ define <vscale x 1 x i32> @usub_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb)
+  %v = call <vscale x 1 x i32> @llvm.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 2))
   ret <vscale x 1 x i32> %v
 }
 
@@ -553,9 +525,7 @@ define <vscale x 2 x i32> @usub_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb)
+  %v = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 2))
   ret <vscale x 2 x i32> %v
 }
 
@@ -590,9 +560,7 @@ define <vscale x 4 x i32> @usub_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb)
+  %v = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 2))
   ret <vscale x 4 x i32> %v
 }
 
@@ -627,9 +595,7 @@ define <vscale x 8 x i32> @usub_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb)
+  %v = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 2))
   ret <vscale x 8 x i32> %v
 }
 
@@ -664,9 +630,7 @@ define <vscale x 16 x i32> @usub_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb)
+  %v = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 2))
   ret <vscale x 16 x i32> %v
 }
 
@@ -714,9 +678,7 @@ define <vscale x 1 x i64> @usub_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb)
+  %v = call <vscale x 1 x i64> @llvm.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 2))
   ret <vscale x 1 x i64> %v
 }
 
@@ -764,9 +726,7 @@ define <vscale x 2 x i64> @usub_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb)
+  %v = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 2))
   ret <vscale x 2 x i64> %v
 }
 
@@ -814,9 +774,7 @@ define <vscale x 4 x i64> @usub_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb)
+  %v = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 2))
   ret <vscale x 4 x i64> %v
 }
 
@@ -864,8 +822,6 @@ define <vscale x 8 x i64> @usub_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb)
+  %v = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 2))
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index e5589ce1a9bc6e..8275c3081c7c16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -41,9 +41,7 @@ define <vscale x 1 x i8> @vssubu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -81,9 +79,7 @@ define <vscale x 1 x i8> @vssubu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -94,9 +90,7 @@ define <vscale x 1 x i8> @vssubu_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -107,11 +101,7 @@ define <vscale x 1 x i8> @vssubu_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -133,9 +123,7 @@ define <vscale x 2 x i8> @vssubu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -159,9 +147,7 @@ define <vscale x 2 x i8> @vssubu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -172,9 +158,7 @@ define <vscale x 2 x i8> @vssubu_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -185,11 +169,7 @@ define <vscale x 2 x i8> @vssubu_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -211,9 +191,7 @@ define <vscale x 3 x i8> @vssubu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -237,9 +215,7 @@ define <vscale x 3 x i8> @vssubu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -250,9 +226,7 @@ define <vscale x 3 x i8> @vssubu_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -263,11 +237,7 @@ define <vscale x 3 x i8> @vssubu_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
-  %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
-  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 3 x i8> %v
 }
 
@@ -289,9 +259,7 @@ define <vscale x 4 x i8> @vssubu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -315,9 +283,7 @@ define <vscale x 4 x i8> @vssubu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -328,9 +294,7 @@ define <vscale x 4 x i8> @vssubu_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -341,11 +305,7 @@ define <vscale x 4 x i8> @vssubu_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -367,9 +327,7 @@ define <vscale x 8 x i8> @vssubu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -393,9 +351,7 @@ define <vscale x 8 x i8> @vssubu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -406,9 +362,7 @@ define <vscale x 8 x i8> @vssubu_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -419,11 +373,7 @@ define <vscale x 8 x i8> @vssubu_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -445,9 +395,7 @@ define <vscale x 16 x i8> @vssubu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -471,9 +419,7 @@ define <vscale x 16 x i8> @vssubu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -484,9 +430,7 @@ define <vscale x 16 x i8> @vssubu_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -497,11 +441,7 @@ define <vscale x 16 x i8> @vssubu_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -523,9 +463,7 @@ define <vscale x 32 x i8> @vssubu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -549,9 +487,7 @@ define <vscale x 32 x i8> @vssubu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -562,9 +498,7 @@ define <vscale x 32 x i8> @vssubu_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -575,11 +509,7 @@ define <vscale x 32 x i8> @vssubu_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -601,9 +531,7 @@ define <vscale x 64 x i8> @vssubu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -627,9 +555,7 @@ define <vscale x 64 x i8> @vssubu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -640,9 +566,7 @@ define <vscale x 64 x i8> @vssubu_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 6
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -653,11 +577,7 @@ define <vscale x 64 x i8> @vssubu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -688,9 +608,7 @@ define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -713,11 +631,7 @@ define <vscale x 128 x i8> @vssubu_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
-  %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
-  %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+  %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 128 x i8> %v
 }
 
@@ -739,9 +653,7 @@ define <vscale x 1 x i16> @vssubu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -765,9 +677,7 @@ define <vscale x 1 x i16> @vssubu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -778,9 +688,7 @@ define <vscale x 1 x i16> @vssubu_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -791,11 +699,7 @@ define <vscale x 1 x i16> @vssubu_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -817,9 +721,7 @@ define <vscale x 2 x i16> @vssubu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -843,9 +745,7 @@ define <vscale x 2 x i16> @vssubu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -856,9 +756,7 @@ define <vscale x 2 x i16> @vssubu_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -869,11 +767,7 @@ define <vscale x 2 x i16> @vssubu_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -895,9 +789,7 @@ define <vscale x 4 x i16> @vssubu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -921,9 +813,7 @@ define <vscale x 4 x i16> @vssubu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -934,9 +824,7 @@ define <vscale x 4 x i16> @vssubu_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -947,11 +835,7 @@ define <vscale x 4 x i16> @vssubu_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -973,9 +857,7 @@ define <vscale x 8 x i16> @vssubu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -999,9 +881,7 @@ define <vscale x 8 x i16> @vssubu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i1
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1012,9 +892,7 @@ define <vscale x 8 x i16> @vssubu_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1025,11 +903,7 @@ define <vscale x 8 x i16> @vssubu_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1051,9 +925,7 @@ define <vscale x 16 x i16> @vssubu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1077,9 +949,7 @@ define <vscale x 16 x i16> @vssubu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1090,9 +960,7 @@ define <vscale x 16 x i16> @vssubu_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1103,11 +971,7 @@ define <vscale x 16 x i16> @vssubu_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1129,9 +993,7 @@ define <vscale x 32 x i16> @vssubu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1155,9 +1017,7 @@ define <vscale x 32 x i16> @vssubu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1168,9 +1028,7 @@ define <vscale x 32 x i16> @vssubu_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1181,11 +1039,7 @@ define <vscale x 32 x i16> @vssubu_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1207,9 +1061,7 @@ define <vscale x 1 x i32> @vssubu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1233,9 +1085,7 @@ define <vscale x 1 x i32> @vssubu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1246,9 +1096,7 @@ define <vscale x 1 x i32> @vssubu_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1259,11 +1107,7 @@ define <vscale x 1 x i32> @vssubu_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1285,9 +1129,7 @@ define <vscale x 2 x i32> @vssubu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1311,9 +1153,7 @@ define <vscale x 2 x i32> @vssubu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1324,9 +1164,7 @@ define <vscale x 2 x i32> @vssubu_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1337,11 +1175,7 @@ define <vscale x 2 x i32> @vssubu_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1363,9 +1197,7 @@ define <vscale x 4 x i32> @vssubu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1389,9 +1221,7 @@ define <vscale x 4 x i32> @vssubu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1402,9 +1232,7 @@ define <vscale x 4 x i32> @vssubu_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1415,11 +1243,7 @@ define <vscale x 4 x i32> @vssubu_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1441,9 +1265,7 @@ define <vscale x 8 x i32> @vssubu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1467,9 +1289,7 @@ define <vscale x 8 x i32> @vssubu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i3
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1480,9 +1300,7 @@ define <vscale x 8 x i32> @vssubu_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1493,11 +1311,7 @@ define <vscale x 8 x i32> @vssubu_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1519,9 +1333,7 @@ define <vscale x 16 x i32> @vssubu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1545,9 +1357,7 @@ define <vscale x 16 x i32> @vssubu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1558,9 +1368,7 @@ define <vscale x 16 x i32> @vssubu_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1571,11 +1379,7 @@ define <vscale x 16 x i32> @vssubu_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1607,9 +1411,7 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1632,11 +1434,7 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a2
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i32> %v
 }
 
@@ -1658,9 +1456,7 @@ define <vscale x 1 x i64> @vssubu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1712,9 +1508,7 @@ define <vscale x 1 x i64> @vssubu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1725,9 +1519,7 @@ define <vscale x 1 x i64> @vssubu_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1738,11 +1530,7 @@ define <vscale x 1 x i64> @vssubu_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1764,9 +1552,7 @@ define <vscale x 2 x i64> @vssubu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1818,9 +1604,7 @@ define <vscale x 2 x i64> @vssubu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1831,9 +1615,7 @@ define <vscale x 2 x i64> @vssubu_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1844,11 +1626,7 @@ define <vscale x 2 x i64> @vssubu_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1870,9 +1648,7 @@ define <vscale x 4 x i64> @vssubu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1924,9 +1700,7 @@ define <vscale x 4 x i64> @vssubu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1937,9 +1711,7 @@ define <vscale x 4 x i64> @vssubu_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1950,11 +1722,7 @@ define <vscale x 4 x i64> @vssubu_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1976,9 +1744,7 @@ define <vscale x 8 x i64> @vssubu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2030,9 +1796,7 @@ define <vscale x 8 x i64> @vssubu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2043,9 +1807,7 @@ define <vscale x 8 x i64> @vssubu_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2056,10 +1818,6 @@ define <vscale x 8 x i64> @vssubu_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
index 5ab028ffc3deca..b7f404c8e5ac92 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
@@ -31,9 +31,7 @@ define <vscale x 1 x i8> @vsub_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i8> %va, %splat
+  %vc = sub <vscale x 1 x i8> %va, splat (i8 1)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -44,11 +42,7 @@ define <vscale x 1 x i8> @vsub_ii_nxv1i8_1() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    ret
-  %heada = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
-  %splata = shufflevector <vscale x 1 x i8> %heada, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %headb = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
-  %splatb = shufflevector <vscale x 1 x i8> %headb, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i8> %splata, %splatb
+  %vc = sub <vscale x 1 x i8> splat (i8 2), splat (i8 3)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -81,9 +75,7 @@ define <vscale x 2 x i8> @vsub_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i8> %va, %splat
+  %vc = sub <vscale x 2 x i8> %va, splat (i8 1)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -116,9 +108,7 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i8> %va, %splat
+  %vc = sub <vscale x 4 x i8> %va, splat (i8 1)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -151,9 +141,7 @@ define <vscale x 8 x i8> @vsub_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i8> %va, %splat
+  %vc = sub <vscale x 8 x i8> %va, splat (i8 1)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -186,9 +174,7 @@ define <vscale x 16 x i8> @vsub_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i8> %va, %splat
+  %vc = sub <vscale x 16 x i8> %va, splat (i8 1)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -221,9 +207,7 @@ define <vscale x 32 x i8> @vsub_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sub <vscale x 32 x i8> %va, %splat
+  %vc = sub <vscale x 32 x i8> %va, splat (i8 1)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -256,9 +240,7 @@ define <vscale x 64 x i8> @vsub_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 1, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = sub <vscale x 64 x i8> %va, %splat
+  %vc = sub <vscale x 64 x i8> %va, splat (i8 1)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -291,9 +273,7 @@ define <vscale x 1 x i16> @vsub_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i16> %va, %splat
+  %vc = sub <vscale x 1 x i16> %va, splat (i16 1)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -326,9 +306,7 @@ define <vscale x 2 x i16> @vsub_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i16> %va, %splat
+  %vc = sub <vscale x 2 x i16> %va, splat (i16 1)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -361,9 +339,7 @@ define <vscale x 4 x i16> @vsub_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i16> %va, %splat
+  %vc = sub <vscale x 4 x i16> %va, splat (i16 1)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -396,9 +372,7 @@ define <vscale x 8 x i16> @vsub_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i16> %va, %splat
+  %vc = sub <vscale x 8 x i16> %va, splat (i16 1)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -431,9 +405,7 @@ define <vscale x 16 x i16> @vsub_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i16> %va, %splat
+  %vc = sub <vscale x 16 x i16> %va, splat (i16 1)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -466,9 +438,7 @@ define <vscale x 32 x i16> @vsub_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 1, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = sub <vscale x 32 x i16> %va, %splat
+  %vc = sub <vscale x 32 x i16> %va, splat (i16 1)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -501,9 +471,7 @@ define <vscale x 1 x i32> @vsub_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i32> %va, %splat
+  %vc = sub <vscale x 1 x i32> %va, splat (i32 1)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -536,9 +504,7 @@ define <vscale x 2 x i32> @vsub_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i32> %va, %splat
+  %vc = sub <vscale x 2 x i32> %va, splat (i32 1)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -571,9 +537,7 @@ define <vscale x 4 x i32> @vsub_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i32> %va, %splat
+  %vc = sub <vscale x 4 x i32> %va, splat (i32 1)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -606,9 +570,7 @@ define <vscale x 8 x i32> @vsub_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i32> %va, %splat
+  %vc = sub <vscale x 8 x i32> %va, splat (i32 1)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -641,9 +603,7 @@ define <vscale x 16 x i32> @vsub_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 1, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = sub <vscale x 16 x i32> %va, %splat
+  %vc = sub <vscale x 16 x i32> %va, splat (i32 1)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -689,9 +649,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = sub <vscale x 1 x i64> %va, %splat
+  %vc = sub <vscale x 1 x i64> %va, splat (i64 1)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -737,9 +695,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = sub <vscale x 2 x i64> %va, %splat
+  %vc = sub <vscale x 2 x i64> %va, splat (i64 1)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -785,9 +741,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = sub <vscale x 4 x i64> %va, %splat
+  %vc = sub <vscale x 4 x i64> %va, splat (i64 1)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -833,9 +787,7 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = sub <vscale x 8 x i64> %va, %splat
+  %vc = sub <vscale x 8 x i64> %va, splat (i64 1)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -902,9 +854,7 @@ define <vscale x 8 x i32> @vsub_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> zeroinitializer
   %vc = sub <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
index f424c6c3aed943..a2b9285fedeaf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vsub_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -62,9 +60,7 @@ define <vscale x 1 x i8> @vsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,9 +82,7 @@ define <vscale x 2 x i8> @vsub_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -112,9 +106,7 @@ define <vscale x 2 x i8> @vsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -136,9 +128,7 @@ define <vscale x 4 x i8> @vsub_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -162,9 +152,7 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -186,9 +174,7 @@ define <vscale x 5 x i8> @vsub_vv_nxv5i8_unmasked(<vscale x 5 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 5 x i1> %head, <vscale x 5 x i1> poison, <vscale x 5 x i32> zeroinitializer
-  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 5 x i8> %v
 }
 
@@ -212,9 +198,7 @@ define <vscale x 5 x i8> @vsub_vx_nxv5i8_unmasked(<vscale x 5 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> poison, <vscale x 5 x i32> zeroinitializer
-  %head = insertelement <vscale x 5 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 5 x i1> %head, <vscale x 5 x i1> poison, <vscale x 5 x i32> zeroinitializer
-  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> %m, i32 %evl)
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 5 x i8> %v
 }
 
@@ -236,9 +220,7 @@ define <vscale x 8 x i8> @vsub_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -262,9 +244,7 @@ define <vscale x 8 x i8> @vsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -286,9 +266,7 @@ define <vscale x 16 x i8> @vsub_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -312,9 +290,7 @@ define <vscale x 16 x i8> @vsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -336,9 +312,7 @@ define <vscale x 32 x i8> @vsub_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -362,9 +336,7 @@ define <vscale x 32 x i8> @vsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -386,9 +358,7 @@ define <vscale x 64 x i8> @vsub_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -412,9 +382,7 @@ define <vscale x 64 x i8> @vsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -436,9 +404,7 @@ define <vscale x 1 x i16> @vsub_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -462,9 +428,7 @@ define <vscale x 1 x i16> @vsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -486,9 +450,7 @@ define <vscale x 2 x i16> @vsub_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -512,9 +474,7 @@ define <vscale x 2 x i16> @vsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -536,9 +496,7 @@ define <vscale x 4 x i16> @vsub_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -562,9 +520,7 @@ define <vscale x 4 x i16> @vsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -586,9 +542,7 @@ define <vscale x 8 x i16> @vsub_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -612,9 +566,7 @@ define <vscale x 8 x i16> @vsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -636,9 +588,7 @@ define <vscale x 16 x i16> @vsub_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -662,9 +612,7 @@ define <vscale x 16 x i16> @vsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -686,9 +634,7 @@ define <vscale x 32 x i16> @vsub_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -712,9 +658,7 @@ define <vscale x 32 x i16> @vsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -736,9 +680,7 @@ define <vscale x 1 x i32> @vsub_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -762,9 +704,7 @@ define <vscale x 1 x i32> @vsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -786,9 +726,7 @@ define <vscale x 2 x i32> @vsub_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -812,9 +750,7 @@ define <vscale x 2 x i32> @vsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -836,9 +772,7 @@ define <vscale x 4 x i32> @vsub_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -862,9 +796,7 @@ define <vscale x 4 x i32> @vsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -886,9 +818,7 @@ define <vscale x 8 x i32> @vsub_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -912,9 +842,7 @@ define <vscale x 8 x i32> @vsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -936,9 +864,7 @@ define <vscale x 16 x i32> @vsub_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -962,9 +888,7 @@ define <vscale x 16 x i32> @vsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -986,9 +910,7 @@ define <vscale x 1 x i64> @vsub_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1040,9 +962,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -1064,9 +984,7 @@ define <vscale x 2 x i64> @vsub_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1118,9 +1036,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -1142,9 +1058,7 @@ define <vscale x 4 x i64> @vsub_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1196,9 +1110,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -1220,9 +1132,7 @@ define <vscale x 8 x i64> @vsub_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -1274,8 +1184,6 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index 66e6883dd1d3e3..5dd01c654eff1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1406,7 +1406,7 @@ define <vscale x 2 x i32> @vwaddu_vv_disjoint_or_add(<vscale x 2 x i8> %x.i8, <v
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11
 ; CHECK-NEXT:    ret
   %x.i16 = zext <vscale x 2 x i8> %x.i8 to <vscale x 2 x i16>
-  %x.shl = shl <vscale x 2 x i16> %x.i16, shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 8, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer)
+  %x.shl = shl <vscale x 2 x i16> %x.i16, splat (i16 8)
   %x.i32 = zext <vscale x 2 x i16> %x.shl to <vscale x 2 x i32>
   %y.i32 = zext <vscale x 2 x i8> %y.i8 to <vscale x 2 x i32>
   %add = add <vscale x 2 x i32> %x.i32, %y.i32
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll
index f5cf4acd592c0e..02bc8d2731153d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-vp.ll
@@ -17,12 +17,10 @@ define <vscale x 1 x i32> @vwmacc_vv_nxv1i32_unmasked_tu(<vscale x 1 x i16> %a,
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
   <vscale x 1 x i16> %b, <vscale x 1 x i32> %c, i32 zeroext %evl) {
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>%allones, i32 %evl)
-  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
+  %aext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>splat (i1 -1), i32 %evl)
+  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %ret
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll
index 72ef25ee9c318a..486a5b09b677cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-vp.ll
@@ -18,13 +18,11 @@ define <vscale x 1 x i32> @vwmacc_vv_nxv1i32_unmasked_tu(<vscale x 1 x i16> %a,
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
   <vscale x 1 x i16> %b, <vscale x 1 x i32> %c, i32 zeroext %evl) {
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>%allones, i32 %evl)
-  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
+  %aext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>splat (i1 -1), i32 %evl)
+  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %ret
 }
 
@@ -36,12 +34,10 @@ define <vscale x 1 x i32> @vwmacc_vv_nxv1i32_commute_unmasked_tu(<vscale x 1 x i
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
   <vscale x 1 x i16> %b, <vscale x 1 x i32> %c, i32 zeroext %evl) {
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>%allones, i32 %evl)
-  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
+  %aext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.sext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>splat (i1 -1), i32 %evl)
+  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %ret
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll
index 74dcb92b7cd659..125270be4fc851 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-vp.ll
@@ -17,12 +17,10 @@ define <vscale x 1 x i32> @vwmacc_vv_nxv1i32_unmasked_tu(<vscale x 1 x i16> %a,
 ; CHECK-NEXT:    vmv1r.v v8, v10
 ; CHECK-NEXT:    ret
   <vscale x 1 x i16> %b, <vscale x 1 x i32> %c, i32 zeroext %evl) {
-  %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
-  %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %aext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> %allones, i32 %evl)
-  %bext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
-  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> %allones, i32 %evl)
-  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>%allones, i32 %evl)
-  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
+  %aext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %bext = call <vscale x 1 x i32> @llvm.vp.zext.nxv1i32.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %abmul = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %aext, <vscale x 1 x i32> %bext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
+  %cadd = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %abmul, <vscale x 1 x i32> %c, <vscale x 1 x i1>splat (i1 -1), i32 %evl)
+  %ret = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %cadd, <vscale x 1 x i32> %c, i32 %evl)
   ret <vscale x 1 x i32> %ret
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
index e8c4f1fed6efb1..3f10b10675ca70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
@@ -30,9 +30,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i8> %va, %splat
+  %vc = xor <vscale x 1 x i8> %va, splat (i8 -1)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -42,9 +40,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i8> %va, %splat
+  %vc = xor <vscale x 1 x i8> %va, splat (i8 8)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -55,9 +51,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i8> %va, %splat
+  %vc = xor <vscale x 1 x i8> %va, splat (i8 16)
   ret <vscale x 1 x i8> %vc
 }
 
@@ -89,9 +83,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i8> %va, %splat
+  %vc = xor <vscale x 2 x i8> %va, splat (i8 -1)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -101,9 +93,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_1(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i8> %va, %splat
+  %vc = xor <vscale x 2 x i8> %va, splat (i8 8)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -114,9 +104,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i8> %va, %splat
+  %vc = xor <vscale x 2 x i8> %va, splat (i8 16)
   ret <vscale x 2 x i8> %vc
 }
 
@@ -148,9 +136,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i8> %va, %splat
+  %vc = xor <vscale x 4 x i8> %va, splat (i8 -1)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -160,9 +146,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_1(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i8> %va, %splat
+  %vc = xor <vscale x 4 x i8> %va, splat (i8 8)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -173,9 +157,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i8> %va, %splat
+  %vc = xor <vscale x 4 x i8> %va, splat (i8 16)
   ret <vscale x 4 x i8> %vc
 }
 
@@ -207,9 +189,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i8> %va, %splat
+  %vc = xor <vscale x 8 x i8> %va, splat (i8 -1)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -219,9 +199,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i8> %va, %splat
+  %vc = xor <vscale x 8 x i8> %va, splat (i8 8)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -232,9 +210,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i8> %va, %splat
+  %vc = xor <vscale x 8 x i8> %va, splat (i8 16)
   ret <vscale x 8 x i8> %vc
 }
 
@@ -266,9 +242,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i8> %va, %splat
+  %vc = xor <vscale x 16 x i8> %va, splat (i8 -1)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -278,9 +252,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_1(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i8> %va, %splat
+  %vc = xor <vscale x 16 x i8> %va, splat (i8 8)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -291,9 +263,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i8> %va, %splat
+  %vc = xor <vscale x 16 x i8> %va, splat (i8 16)
   ret <vscale x 16 x i8> %vc
 }
 
@@ -325,9 +295,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i8> %va, %splat
+  %vc = xor <vscale x 32 x i8> %va, splat (i8 -1)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -337,9 +305,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_1(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i8> %va, %splat
+  %vc = xor <vscale x 32 x i8> %va, splat (i8 8)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -350,9 +316,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i8> %va, %splat
+  %vc = xor <vscale x 32 x i8> %va, splat (i8 16)
   ret <vscale x 32 x i8> %vc
 }
 
@@ -384,9 +348,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = xor <vscale x 64 x i8> %va, %splat
+  %vc = xor <vscale x 64 x i8> %va, splat (i8 -1)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -396,9 +358,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_1(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 8, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = xor <vscale x 64 x i8> %va, %splat
+  %vc = xor <vscale x 64 x i8> %va, splat (i8 8)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -409,9 +369,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i8> poison, i8 16, i32 0
-  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %vc = xor <vscale x 64 x i8> %va, %splat
+  %vc = xor <vscale x 64 x i8> %va, splat (i8 16)
   ret <vscale x 64 x i8> %vc
 }
 
@@ -443,9 +401,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i16> %va, %splat
+  %vc = xor <vscale x 1 x i16> %va, splat (i16 -1)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -455,9 +411,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_1(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i16> %va, %splat
+  %vc = xor <vscale x 1 x i16> %va, splat (i16 8)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -468,9 +422,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i16> %va, %splat
+  %vc = xor <vscale x 1 x i16> %va, splat (i16 16)
   ret <vscale x 1 x i16> %vc
 }
 
@@ -502,9 +454,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i16> %va, %splat
+  %vc = xor <vscale x 2 x i16> %va, splat (i16 -1)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -514,9 +464,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_1(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i16> %va, %splat
+  %vc = xor <vscale x 2 x i16> %va, splat (i16 8)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -527,9 +475,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i16> %va, %splat
+  %vc = xor <vscale x 2 x i16> %va, splat (i16 16)
   ret <vscale x 2 x i16> %vc
 }
 
@@ -561,9 +507,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i16> %va, %splat
+  %vc = xor <vscale x 4 x i16> %va, splat (i16 -1)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -573,9 +517,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_1(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i16> %va, %splat
+  %vc = xor <vscale x 4 x i16> %va, splat (i16 8)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -586,9 +528,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i16> %va, %splat
+  %vc = xor <vscale x 4 x i16> %va, splat (i16 16)
   ret <vscale x 4 x i16> %vc
 }
 
@@ -620,9 +560,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i16> %va, %splat
+  %vc = xor <vscale x 8 x i16> %va, splat (i16 -1)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -632,9 +570,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i16> %va, %splat
+  %vc = xor <vscale x 8 x i16> %va, splat (i16 8)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -645,9 +581,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i16> %va, %splat
+  %vc = xor <vscale x 8 x i16> %va, splat (i16 16)
   ret <vscale x 8 x i16> %vc
 }
 
@@ -679,9 +613,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i16> %va, %splat
+  %vc = xor <vscale x 16 x i16> %va, splat (i16 -1)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -691,9 +623,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_1(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i16> %va, %splat
+  %vc = xor <vscale x 16 x i16> %va, splat (i16 8)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -704,9 +634,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i16> %va, %splat
+  %vc = xor <vscale x 16 x i16> %va, splat (i16 16)
   ret <vscale x 16 x i16> %vc
 }
 
@@ -738,9 +666,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i16> %va, %splat
+  %vc = xor <vscale x 32 x i16> %va, splat (i16 -1)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -750,9 +676,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_1(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 8, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i16> %va, %splat
+  %vc = xor <vscale x 32 x i16> %va, splat (i16 8)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -763,9 +687,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i16> poison, i16 16, i32 0
-  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %vc = xor <vscale x 32 x i16> %va, %splat
+  %vc = xor <vscale x 32 x i16> %va, splat (i16 16)
   ret <vscale x 32 x i16> %vc
 }
 
@@ -797,9 +719,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i32> %va, %splat
+  %vc = xor <vscale x 1 x i32> %va, splat (i32 -1)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -809,9 +729,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i32> %va, %splat
+  %vc = xor <vscale x 1 x i32> %va, splat (i32 8)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -822,9 +740,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i32> %va, %splat
+  %vc = xor <vscale x 1 x i32> %va, splat (i32 16)
   ret <vscale x 1 x i32> %vc
 }
 
@@ -856,9 +772,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i32> %va, %splat
+  %vc = xor <vscale x 2 x i32> %va, splat (i32 -1)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -868,9 +782,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i32> %va, %splat
+  %vc = xor <vscale x 2 x i32> %va, splat (i32 8)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -881,9 +793,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i32> %va, %splat
+  %vc = xor <vscale x 2 x i32> %va, splat (i32 16)
   ret <vscale x 2 x i32> %vc
 }
 
@@ -915,9 +825,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i32> %va, %splat
+  %vc = xor <vscale x 4 x i32> %va, splat (i32 -1)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -927,9 +835,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i32> %va, %splat
+  %vc = xor <vscale x 4 x i32> %va, splat (i32 8)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -940,9 +846,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i32> %va, %splat
+  %vc = xor <vscale x 4 x i32> %va, splat (i32 16)
   ret <vscale x 4 x i32> %vc
 }
 
@@ -974,9 +878,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i32> %va, %splat
+  %vc = xor <vscale x 8 x i32> %va, splat (i32 -1)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -986,9 +888,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i32> %va, %splat
+  %vc = xor <vscale x 8 x i32> %va, splat (i32 8)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -999,9 +899,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i32> %va, %splat
+  %vc = xor <vscale x 8 x i32> %va, splat (i32 16)
   ret <vscale x 8 x i32> %vc
 }
 
@@ -1033,9 +931,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i32> %va, %splat
+  %vc = xor <vscale x 16 x i32> %va, splat (i32 -1)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1045,9 +941,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_1(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 8, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i32> %va, %splat
+  %vc = xor <vscale x 16 x i32> %va, splat (i32 8)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1058,9 +952,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i32> poison, i32 16, i32 0
-  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %vc = xor <vscale x 16 x i32> %va, %splat
+  %vc = xor <vscale x 16 x i32> %va, splat (i32 16)
   ret <vscale x 16 x i32> %vc
 }
 
@@ -1105,9 +997,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i64> %va, %splat
+  %vc = xor <vscale x 1 x i64> %va, splat (i64 -1)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1117,9 +1007,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i64> %va, %splat
+  %vc = xor <vscale x 1 x i64> %va, splat (i64 8)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1130,9 +1018,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %vc = xor <vscale x 1 x i64> %va, %splat
+  %vc = xor <vscale x 1 x i64> %va, splat (i64 16)
   ret <vscale x 1 x i64> %vc
 }
 
@@ -1177,9 +1063,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i64> %va, %splat
+  %vc = xor <vscale x 2 x i64> %va, splat (i64 -1)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1189,9 +1073,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i64> %va, %splat
+  %vc = xor <vscale x 2 x i64> %va, splat (i64 8)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1202,9 +1084,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %vc = xor <vscale x 2 x i64> %va, %splat
+  %vc = xor <vscale x 2 x i64> %va, splat (i64 16)
   ret <vscale x 2 x i64> %vc
 }
 
@@ -1249,9 +1129,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i64> %va, %splat
+  %vc = xor <vscale x 4 x i64> %va, splat (i64 -1)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1261,9 +1139,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i64> %va, %splat
+  %vc = xor <vscale x 4 x i64> %va, splat (i64 8)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1274,9 +1150,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %vc = xor <vscale x 4 x i64> %va, %splat
+  %vc = xor <vscale x 4 x i64> %va, splat (i64 16)
   ret <vscale x 4 x i64> %vc
 }
 
@@ -1321,9 +1195,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i64> %va, %splat
+  %vc = xor <vscale x 8 x i64> %va, splat (i64 -1)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1333,9 +1205,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 8
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 8, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i64> %va, %splat
+  %vc = xor <vscale x 8 x i64> %va, splat (i64 8)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1346,9 +1216,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i64> poison, i64 16, i32 0
-  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %vc = xor <vscale x 8 x i64> %va, %splat
+  %vc = xor <vscale x 8 x i64> %va, splat (i64 16)
   ret <vscale x 8 x i64> %vc
 }
 
@@ -1413,9 +1281,7 @@ define <vscale x 8 x i32> @vxor_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
+  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> zeroinitializer
   %vc = xor <vscale x 8 x i32> %va, %vs
   ret <vscale x 8 x i32> %vc
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
index 96b86ff6179d48..f2235b4fdc94ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
@@ -36,9 +36,7 @@ define <vscale x 1 x i8> @vxor_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -62,9 +60,7 @@ define <vscale x 1 x i8> @vxor_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -74,9 +70,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 7), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -86,11 +80,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 7), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -100,9 +90,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_1(<vscale x 1 x i8> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -112,11 +100,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_unmasked_1(<vscale x 1 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
 
@@ -138,9 +122,7 @@ define <vscale x 2 x i8> @vxor_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -164,9 +146,7 @@ define <vscale x 2 x i8> @vxor_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -176,9 +156,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 7), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -188,11 +166,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 7), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -202,9 +176,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_1(<vscale x 2 x i8> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -214,11 +186,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_unmasked_1(<vscale x 2 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
@@ -240,9 +208,7 @@ define <vscale x 4 x i8> @vxor_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -266,9 +232,7 @@ define <vscale x 4 x i8> @vxor_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -278,9 +242,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 7), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -290,11 +252,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 7), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -304,9 +262,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_1(<vscale x 4 x i8> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -316,11 +272,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_unmasked_1(<vscale x 4 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
 
@@ -342,9 +294,7 @@ define <vscale x 8 x i8> @vxor_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -368,9 +318,7 @@ define <vscale x 8 x i8> @vxor_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -380,9 +328,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 7), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -392,11 +338,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 7), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -406,9 +348,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_1(<vscale x 8 x i8> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -418,11 +358,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked_1(<vscale x 8 x i8> %va, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
 
@@ -444,9 +380,7 @@ define <vscale x 15 x i8> @vxor_vv_nxv15i8_unmasked(<vscale x 15 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -470,9 +404,7 @@ define <vscale x 15 x i8> @vxor_vx_nxv15i8_unmasked(<vscale x 15 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 15 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
-  %head = insertelement <vscale x 15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -482,9 +414,7 @@ define <vscale x 15 x i8> @vxor_vi_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 15 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 7), <vscale x 15 x i1> %m, i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -494,11 +424,7 @@ define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked(<vscale x 15 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 15 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
-  %head = insertelement <vscale x 15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 7), <vscale x 15 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -508,9 +434,7 @@ define <vscale x 15 x i8> @vxor_vi_nxv15i8_1(<vscale x 15 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 15 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 -1), <vscale x 15 x i1> %m, i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -520,11 +444,7 @@ define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked_1(<vscale x 15 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 15 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> poison, <vscale x 15 x i32> zeroinitializer
-  %head = insertelement <vscale x 15 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> poison, <vscale x 15 x i32> zeroinitializer
-  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> splat (i8 -1), <vscale x 15 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 15 x i8> %v
 }
 
@@ -546,9 +466,7 @@ define <vscale x 16 x i8> @vxor_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -572,9 +490,7 @@ define <vscale x 16 x i8> @vxor_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -584,9 +500,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 7), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -596,11 +510,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 7), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -610,9 +520,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_1(<vscale x 16 x i8> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -622,11 +530,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_unmasked_1(<vscale x 16 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
 
@@ -648,9 +552,7 @@ define <vscale x 32 x i8> @vxor_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -674,9 +576,7 @@ define <vscale x 32 x i8> @vxor_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -686,9 +586,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 7), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -698,11 +596,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 7), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -712,9 +606,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_1(<vscale x 32 x i8> %va, <vscale x 3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -724,11 +616,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_unmasked_1(<vscale x 32 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
 
@@ -750,9 +638,7 @@ define <vscale x 64 x i8> @vxor_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -776,9 +662,7 @@ define <vscale x 64 x i8> @vxor_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -788,9 +672,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 7), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -800,11 +682,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 7, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 7), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -814,9 +692,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_1(<vscale x 64 x i8> %va, <vscale x 6
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -826,11 +702,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_unmasked_1(<vscale x 64 x i8> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
-  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
-  %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
-  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+  %v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
 
@@ -852,9 +724,7 @@ define <vscale x 1 x i16> @vxor_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -890,9 +760,7 @@ define <vscale x 1 x i16> @vxor_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -902,9 +770,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 7), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -914,11 +780,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 7), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -928,9 +790,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_1(<vscale x 1 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -940,11 +800,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_unmasked_1(<vscale x 1 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
 
@@ -966,9 +822,7 @@ define <vscale x 2 x i16> @vxor_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -992,9 +846,7 @@ define <vscale x 2 x i16> @vxor_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1004,9 +856,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 7), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1016,11 +866,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 7), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1030,9 +876,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_1(<vscale x 2 x i16> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1042,11 +886,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_unmasked_1(<vscale x 2 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
@@ -1068,9 +908,7 @@ define <vscale x 4 x i16> @vxor_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1094,9 +932,7 @@ define <vscale x 4 x i16> @vxor_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1106,9 +942,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 7), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1118,11 +952,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 7), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1132,9 +962,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_1(<vscale x 4 x i16> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1144,11 +972,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_unmasked_1(<vscale x 4 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
 
@@ -1170,9 +994,7 @@ define <vscale x 8 x i16> @vxor_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1196,9 +1018,7 @@ define <vscale x 8 x i16> @vxor_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1208,9 +1028,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 7), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1220,11 +1038,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 7), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1234,9 +1048,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_1(<vscale x 8 x i16> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1246,11 +1058,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_unmasked_1(<vscale x 8 x i16> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
 
@@ -1272,9 +1080,7 @@ define <vscale x 16 x i16> @vxor_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1298,9 +1104,7 @@ define <vscale x 16 x i16> @vxor_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1310,9 +1114,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 7), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1322,11 +1124,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 7), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1336,9 +1134,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_1(<vscale x 16 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1348,11 +1144,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_unmasked_1(<vscale x 16 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
 
@@ -1374,9 +1166,7 @@ define <vscale x 32 x i16> @vxor_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1400,9 +1190,7 @@ define <vscale x 32 x i16> @vxor_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1412,9 +1200,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 7), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1424,11 +1210,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 7, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 7), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1438,9 +1220,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_1(<vscale x 32 x i16> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1450,11 +1230,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_unmasked_1(<vscale x 32 x i16> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
-  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
-  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+  %v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
 
@@ -1476,9 +1252,7 @@ define <vscale x 1 x i32> @vxor_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1502,9 +1276,7 @@ define <vscale x 1 x i32> @vxor_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1514,9 +1286,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 7), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1526,11 +1296,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 7), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1540,9 +1306,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_1(<vscale x 1 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1552,11 +1316,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_unmasked_1(<vscale x 1 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
 
@@ -1578,9 +1338,7 @@ define <vscale x 2 x i32> @vxor_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1604,9 +1362,7 @@ define <vscale x 2 x i32> @vxor_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1616,9 +1372,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 7), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1628,11 +1382,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 7), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1642,9 +1392,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_1(<vscale x 2 x i32> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1654,11 +1402,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_unmasked_1(<vscale x 2 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
@@ -1680,9 +1424,7 @@ define <vscale x 4 x i32> @vxor_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1706,9 +1448,7 @@ define <vscale x 4 x i32> @vxor_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1718,9 +1458,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 7), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1730,11 +1468,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 7), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1744,9 +1478,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_1(<vscale x 4 x i32> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1756,11 +1488,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_unmasked_1(<vscale x 4 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
 
@@ -1782,9 +1510,7 @@ define <vscale x 8 x i32> @vxor_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1808,9 +1534,7 @@ define <vscale x 8 x i32> @vxor_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1820,9 +1544,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1832,11 +1554,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1846,9 +1564,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_1(<vscale x 8 x i32> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1858,11 +1574,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_unmasked_1(<vscale x 8 x i32> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
 
@@ -1884,9 +1596,7 @@ define <vscale x 16 x i32> @vxor_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1910,9 +1620,7 @@ define <vscale x 16 x i32> @vxor_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1922,9 +1630,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 7), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1934,11 +1640,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 7, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 7), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1948,9 +1650,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_1(<vscale x 16 x i32> %va, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1960,11 +1660,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_unmasked_1(<vscale x 16 x i32> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
-  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
-  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+  %v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
 
@@ -1986,9 +1682,7 @@ define <vscale x 1 x i64> @vxor_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2040,9 +1734,7 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2052,9 +1744,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 7), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2064,11 +1754,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 7), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2078,9 +1764,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_1(<vscale x 1 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2090,11 +1774,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_unmasked_1(<vscale x 1 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
-  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+  %v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
 
@@ -2116,9 +1796,7 @@ define <vscale x 2 x i64> @vxor_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2170,9 +1848,7 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2182,9 +1858,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 7), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2194,11 +1868,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 7), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2208,9 +1878,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_1(<vscale x 2 x i64> %va, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2220,11 +1888,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_unmasked_1(<vscale x 2 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+  %v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
@@ -2246,9 +1910,7 @@ define <vscale x 4 x i64> @vxor_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2300,9 +1962,7 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2312,9 +1972,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 7), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2324,11 +1982,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 7), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2338,9 +1992,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_1(<vscale x 4 x i64> %va, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2350,11 +2002,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_unmasked_1(<vscale x 4 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+  %v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
 
@@ -2376,9 +2024,7 @@ define <vscale x 8 x i64> @vxor_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2430,9 +2076,7 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
 ; RV64-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2442,9 +2086,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 7), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2454,11 +2096,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vxor.vi v8, v8, 7
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 7), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2468,9 +2106,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_1(<vscale x 8 x i64> %va, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
 
@@ -2480,10 +2116,6 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_unmasked_1(<vscale x 8 x i64> %va, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vnot.v v8, v8
 ; CHECK-NEXT:    ret
-  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
-  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
-  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+  %v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }

>From ba3c22ec5e2eb43dc5d03f36c922142c94afadc1 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 9 Apr 2024 07:40:23 +0800
Subject: [PATCH 2/2] Split out tests with codegen diffs

---
 .../RISCV/rvv/fixed-vectors-masked-gather.ll  | 2299 +++++++++++++----
 .../RISCV/rvv/fixed-vectors-masked-scatter.ll |  494 +++-
 .../RISCV/rvv/fixed-vectors-vadd-vp.ll        |  356 ++-
 .../RISCV/rvv/fixed-vectors-vmax-vp.ll        |  152 +-
 .../RISCV/rvv/fixed-vectors-vmaxu-vp.ll       |  152 +-
 .../RISCV/rvv/fixed-vectors-vmin-vp.ll        |  152 +-
 .../RISCV/rvv/fixed-vectors-vminu-vp.ll       |  152 +-
 .../RISCV/rvv/fixed-vectors-vsadd-vp.ll       |  356 ++-
 .../RISCV/rvv/fixed-vectors-vsaddu-vp.ll      |  356 ++-
 .../RISCV/rvv/fixed-vectors-vssub-vp.ll       |  356 ++-
 .../RISCV/rvv/fixed-vectors-vssubu-vp.ll      |  356 ++-
 llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll    |   64 +-
 llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll    |   64 +-
 llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll   |   72 +-
 llvm/test/CodeGen/RISCV/rvv/rint-vp.ll        |   72 +-
 llvm/test/CodeGen/RISCV/rvv/round-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll   |   72 +-
 llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll |   72 +-
 .../CodeGen/RISCV/rvv/strided-load-store.ll   |   12 +-
 llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll   |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll       |  132 +-
 llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll       |  128 +-
 llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll       |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll       |   64 +-
 llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll       |  128 +-
 llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll       |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll      |   72 +-
 llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll       |  128 +-
 29 files changed, 5050 insertions(+), 1555 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 9fbc22221f99bd..a09ab3ee0252a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -518,20 +518,55 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i8:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB9_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB9_6
+; RV64ZVE32F-NEXT:  .LBB9_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB9_7
+; RV64ZVE32F-NEXT:  .LBB9_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB9_8
+; RV64ZVE32F-NEXT:  .LBB9_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB9_5: # %cond.load
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e8, mf2, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB9_2
+; RV64ZVE32F-NEXT:  .LBB9_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB9_3
+; RV64ZVE32F-NEXT:  .LBB9_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    lbu a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lbu a2, 0(a2)
-; RV64ZVE32F-NEXT:    lbu a3, 0(a3)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e8, mf4, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB9_4
+; RV64ZVE32F-NEXT:  .LBB9_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    lbu a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vlse8.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1), <4 x i8> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> %mtrue, <4 x i8> %passthru)
   ret <4 x i8> %v
 }
 
@@ -1207,20 +1242,55 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i16:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB20_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB20_6
+; RV64ZVE32F-NEXT:  .LBB20_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB20_7
+; RV64ZVE32F-NEXT:  .LBB20_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB20_8
+; RV64ZVE32F-NEXT:  .LBB20_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB20_5: # %cond.load
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB20_2
+; RV64ZVE32F-NEXT:  .LBB20_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    lh a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB20_3
+; RV64ZVE32F-NEXT:  .LBB20_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    lh a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
-; RV64ZVE32F-NEXT:    lh a3, 0(a3)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB20_4
+; RV64ZVE32F-NEXT:  .LBB20_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    lh a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x i16> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue, <4 x i16> %passthru)
   ret <4 x i16> %v
 }
 
@@ -2256,20 +2326,55 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i32:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB32_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB32_6
+; RV64ZVE32F-NEXT:  .LBB32_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB32_7
+; RV64ZVE32F-NEXT:  .LBB32_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB32_8
+; RV64ZVE32F-NEXT:  .LBB32_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB32_5: # %cond.load
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    lw a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v8, a2
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB32_2
+; RV64ZVE32F-NEXT:  .LBB32_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    lw a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB32_3
+; RV64ZVE32F-NEXT:  .LBB32_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    lw a1, 0(a1)
 ; RV64ZVE32F-NEXT:    lw a2, 0(a2)
-; RV64ZVE32F-NEXT:    lw a3, 0(a3)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB32_4
+; RV64ZVE32F-NEXT:  .LBB32_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    lw a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x i32> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue, <4 x i32> %passthru)
   ret <4 x i32> %v
 }
 
@@ -3734,48 +3839,117 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x ptr> %ptrs, <4 x i64> %passthru) {
 ;
 ; RV32ZVE32F-LABEL: mgather_truemask_v4i64:
 ; RV32ZVE32F:       # %bb.0:
+; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32ZVE32F-NEXT:    vmset.m v9
+; RV32ZVE32F-NEXT:    vmv.x.s a6, v9
+; RV32ZVE32F-NEXT:    bnez zero, .LBB45_5
+; RV32ZVE32F-NEXT:  # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a3, v8
+; RV32ZVE32F-NEXT:    lw a2, 4(a3)
+; RV32ZVE32F-NEXT:    lw a3, 0(a3)
+; RV32ZVE32F-NEXT:    andi a4, a6, 2
+; RV32ZVE32F-NEXT:    bnez a4, .LBB45_6
+; RV32ZVE32F-NEXT:  .LBB45_2:
+; RV32ZVE32F-NEXT:    lw a4, 12(a1)
+; RV32ZVE32F-NEXT:    lw a5, 8(a1)
+; RV32ZVE32F-NEXT:    andi a7, a6, 4
+; RV32ZVE32F-NEXT:    bnez a7, .LBB45_7
+; RV32ZVE32F-NEXT:  .LBB45_3:
+; RV32ZVE32F-NEXT:    lw a7, 20(a1)
+; RV32ZVE32F-NEXT:    lw t0, 16(a1)
+; RV32ZVE32F-NEXT:    andi a6, a6, 8
+; RV32ZVE32F-NEXT:    bnez a6, .LBB45_8
+; RV32ZVE32F-NEXT:  .LBB45_4:
+; RV32ZVE32F-NEXT:    lw a6, 28(a1)
+; RV32ZVE32F-NEXT:    lw a1, 24(a1)
+; RV32ZVE32F-NEXT:    j .LBB45_9
+; RV32ZVE32F-NEXT:  .LBB45_5:
+; RV32ZVE32F-NEXT:    lw a2, 4(a1)
+; RV32ZVE32F-NEXT:    lw a3, 0(a1)
+; RV32ZVE32F-NEXT:    andi a4, a6, 2
+; RV32ZVE32F-NEXT:    beqz a4, .LBB45_2
+; RV32ZVE32F-NEXT:  .LBB45_6: # %cond.load1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV32ZVE32F-NEXT:    lw a2, 0(a1)
-; RV32ZVE32F-NEXT:    lw a1, 4(a1)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    vmv.x.s a3, v9
-; RV32ZVE32F-NEXT:    lw a4, 0(a3)
-; RV32ZVE32F-NEXT:    lw a3, 4(a3)
-; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV32ZVE32F-NEXT:    vmv.x.s a5, v9
-; RV32ZVE32F-NEXT:    lw a6, 0(a5)
-; RV32ZVE32F-NEXT:    lw a5, 4(a5)
+; RV32ZVE32F-NEXT:    lw a4, 4(a5)
+; RV32ZVE32F-NEXT:    lw a5, 0(a5)
+; RV32ZVE32F-NEXT:    andi a7, a6, 4
+; RV32ZVE32F-NEXT:    beqz a7, .LBB45_3
+; RV32ZVE32F-NEXT:  .LBB45_7: # %cond.load4
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
+; RV32ZVE32F-NEXT:    vmv.x.s t0, v9
+; RV32ZVE32F-NEXT:    lw a7, 4(t0)
+; RV32ZVE32F-NEXT:    lw t0, 0(t0)
+; RV32ZVE32F-NEXT:    andi a6, a6, 8
+; RV32ZVE32F-NEXT:    beqz a6, .LBB45_4
+; RV32ZVE32F-NEXT:  .LBB45_8: # %cond.load7
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
-; RV32ZVE32F-NEXT:    vmv.x.s a7, v8
-; RV32ZVE32F-NEXT:    lw t0, 4(a7)
-; RV32ZVE32F-NEXT:    lw a7, 0(a7)
-; RV32ZVE32F-NEXT:    sw a1, 4(a0)
-; RV32ZVE32F-NEXT:    sw a2, 0(a0)
-; RV32ZVE32F-NEXT:    sw t0, 28(a0)
-; RV32ZVE32F-NEXT:    sw a7, 24(a0)
-; RV32ZVE32F-NEXT:    sw a5, 20(a0)
-; RV32ZVE32F-NEXT:    sw a6, 16(a0)
-; RV32ZVE32F-NEXT:    sw a3, 12(a0)
-; RV32ZVE32F-NEXT:    sw a4, 8(a0)
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV32ZVE32F-NEXT:    lw a6, 4(a1)
+; RV32ZVE32F-NEXT:    lw a1, 0(a1)
+; RV32ZVE32F-NEXT:  .LBB45_9: # %else8
+; RV32ZVE32F-NEXT:    sw a3, 0(a0)
+; RV32ZVE32F-NEXT:    sw a2, 4(a0)
+; RV32ZVE32F-NEXT:    sw a5, 8(a0)
+; RV32ZVE32F-NEXT:    sw a4, 12(a0)
+; RV32ZVE32F-NEXT:    sw t0, 16(a0)
+; RV32ZVE32F-NEXT:    sw a7, 20(a0)
+; RV32ZVE32F-NEXT:    sw a1, 24(a0)
+; RV32ZVE32F-NEXT:    sw a6, 28(a0)
 ; RV32ZVE32F-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4i64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a2, 24(a1)
-; RV64ZVE32F-NEXT:    ld a3, 16(a1)
-; RV64ZVE32F-NEXT:    ld a4, 8(a1)
-; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:    ld a2, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a5, v8
+; RV64ZVE32F-NEXT:    bnez zero, .LBB45_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
+; RV64ZVE32F-NEXT:    ld a3, 0(a1)
 ; RV64ZVE32F-NEXT:    ld a3, 0(a3)
+; RV64ZVE32F-NEXT:    andi a4, a5, 2
+; RV64ZVE32F-NEXT:    bnez a4, .LBB45_6
+; RV64ZVE32F-NEXT:  .LBB45_2:
+; RV64ZVE32F-NEXT:    ld a4, 8(a2)
+; RV64ZVE32F-NEXT:    andi a6, a5, 4
+; RV64ZVE32F-NEXT:    bnez a6, .LBB45_7
+; RV64ZVE32F-NEXT:  .LBB45_3:
+; RV64ZVE32F-NEXT:    ld a6, 16(a2)
+; RV64ZVE32F-NEXT:    andi a5, a5, 8
+; RV64ZVE32F-NEXT:    bnez a5, .LBB45_8
+; RV64ZVE32F-NEXT:  .LBB45_4:
+; RV64ZVE32F-NEXT:    ld a1, 24(a2)
+; RV64ZVE32F-NEXT:    j .LBB45_9
+; RV64ZVE32F-NEXT:  .LBB45_5:
+; RV64ZVE32F-NEXT:    ld a3, 0(a2)
+; RV64ZVE32F-NEXT:    andi a4, a5, 2
+; RV64ZVE32F-NEXT:    beqz a4, .LBB45_2
+; RV64ZVE32F-NEXT:  .LBB45_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a4, 8(a1)
 ; RV64ZVE32F-NEXT:    ld a4, 0(a4)
+; RV64ZVE32F-NEXT:    andi a6, a5, 4
+; RV64ZVE32F-NEXT:    beqz a6, .LBB45_3
+; RV64ZVE32F-NEXT:  .LBB45_7: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a6, 16(a1)
+; RV64ZVE32F-NEXT:    ld a6, 0(a6)
+; RV64ZVE32F-NEXT:    andi a5, a5, 8
+; RV64ZVE32F-NEXT:    beqz a5, .LBB45_4
+; RV64ZVE32F-NEXT:  .LBB45_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a1, 24(a1)
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:    sd a2, 24(a0)
-; RV64ZVE32F-NEXT:    sd a3, 16(a0)
+; RV64ZVE32F-NEXT:  .LBB45_9: # %else8
+; RV64ZVE32F-NEXT:    sd a3, 0(a0)
 ; RV64ZVE32F-NEXT:    sd a4, 8(a0)
-; RV64ZVE32F-NEXT:    sd a1, 0(a0)
+; RV64ZVE32F-NEXT:    sd a6, 16(a0)
+; RV64ZVE32F-NEXT:    sd a1, 24(a0)
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1), <4 x i64> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue, <4 x i64> %passthru)
   ret <4 x i64> %v
 }
 
@@ -7016,20 +7190,55 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f16:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB61_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB61_6
+; RV64ZVE32F-NEXT:  .LBB61_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB61_7
+; RV64ZVE32F-NEXT:  .LBB61_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB61_8
+; RV64ZVE32F-NEXT:  .LBB61_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB61_5: # %cond.load
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB61_2
+; RV64ZVE32F-NEXT:  .LBB61_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, mf2, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB61_3
+; RV64ZVE32F-NEXT:  .LBB61_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    flh fa5, 0(a1)
-; RV64ZVE32F-NEXT:    flh fa4, 0(a2)
-; RV64ZVE32F-NEXT:    flh fa3, 0(a3)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, mf2, tu, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB61_4
+; RV64ZVE32F-NEXT:  .LBB61_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    flh fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa3
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x half> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue, <4 x half> %passthru)
   ret <4 x half> %v
 }
 
@@ -7939,20 +8148,55 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f32:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB71_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB71_6
+; RV64ZVE32F-NEXT:  .LBB71_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB71_7
+; RV64ZVE32F-NEXT:  .LBB71_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB71_8
+; RV64ZVE32F-NEXT:  .LBB71_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB71_5: # %cond.load
+; RV64ZVE32F-NEXT:    ld a2, 0(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v8, fa5
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB71_2
+; RV64ZVE32F-NEXT:  .LBB71_6: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a2, 8(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB71_3
+; RV64ZVE32F-NEXT:  .LBB71_7: # %cond.load4
 ; RV64ZVE32F-NEXT:    ld a2, 16(a0)
-; RV64ZVE32F-NEXT:    ld a3, 24(a0)
-; RV64ZVE32F-NEXT:    ld a0, 0(a0)
-; RV64ZVE32F-NEXT:    flw fa5, 0(a1)
-; RV64ZVE32F-NEXT:    flw fa4, 0(a2)
-; RV64ZVE32F-NEXT:    flw fa3, 0(a3)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB71_4
+; RV64ZVE32F-NEXT:  .LBB71_8: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a0, 24(a0)
+; RV64ZVE32F-NEXT:    flw fa5, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa5
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa4
-; RV64ZVE32F-NEXT:    vfslide1down.vf v8, v8, fa3
+; RV64ZVE32F-NEXT:    vfmv.s.f v9, fa5
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x float> %passthru)
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue, <4 x float> %passthru)
   ret <4 x float> %v
 }
 
@@ -9383,43 +9627,98 @@ define <4 x double> @mgather_truemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passt
 ;
 ; RV32ZVE32F-LABEL: mgather_truemask_v4f64:
 ; RV32ZVE32F:       # %bb.0:
-; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
-; RV32ZVE32F-NEXT:    fld fa5, 0(a1)
-; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
+; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32ZVE32F-NEXT:    vmset.m v9
 ; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    fld fa4, 0(a1)
+; RV32ZVE32F-NEXT:    beqz zero, .LBB84_6
+; RV32ZVE32F-NEXT:  # %bb.1: # %else
+; RV32ZVE32F-NEXT:    andi a2, a1, 2
+; RV32ZVE32F-NEXT:    bnez a2, .LBB84_7
+; RV32ZVE32F-NEXT:  .LBB84_2: # %else2
+; RV32ZVE32F-NEXT:    andi a2, a1, 4
+; RV32ZVE32F-NEXT:    bnez a2, .LBB84_8
+; RV32ZVE32F-NEXT:  .LBB84_3: # %else5
+; RV32ZVE32F-NEXT:    andi a1, a1, 8
+; RV32ZVE32F-NEXT:    beqz a1, .LBB84_5
+; RV32ZVE32F-NEXT:  .LBB84_4: # %cond.load7
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV32ZVE32F-NEXT:    fld fa3, 0(a1)
-; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
-; RV32ZVE32F-NEXT:    fld fa2, 0(a1)
-; RV32ZVE32F-NEXT:    fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT:  .LBB84_5: # %else8
+; RV32ZVE32F-NEXT:    fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT:    fsd fa1, 8(a0)
+; RV32ZVE32F-NEXT:    fsd fa2, 16(a0)
 ; RV32ZVE32F-NEXT:    fsd fa3, 24(a0)
-; RV32ZVE32F-NEXT:    fsd fa4, 16(a0)
-; RV32ZVE32F-NEXT:    fsd fa2, 8(a0)
 ; RV32ZVE32F-NEXT:    ret
+; RV32ZVE32F-NEXT:  .LBB84_6: # %cond.load
+; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a2, v8
+; RV32ZVE32F-NEXT:    fld fa0, 0(a2)
+; RV32ZVE32F-NEXT:    andi a2, a1, 2
+; RV32ZVE32F-NEXT:    beqz a2, .LBB84_2
+; RV32ZVE32F-NEXT:  .LBB84_7: # %cond.load1
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
+; RV32ZVE32F-NEXT:    vmv.x.s a2, v9
+; RV32ZVE32F-NEXT:    fld fa1, 0(a2)
+; RV32ZVE32F-NEXT:    andi a2, a1, 4
+; RV32ZVE32F-NEXT:    beqz a2, .LBB84_3
+; RV32ZVE32F-NEXT:  .LBB84_8: # %cond.load4
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
+; RV32ZVE32F-NEXT:    vmv.x.s a2, v9
+; RV32ZVE32F-NEXT:    fld fa2, 0(a2)
+; RV32ZVE32F-NEXT:    andi a1, a1, 8
+; RV32ZVE32F-NEXT:    bnez a1, .LBB84_4
+; RV32ZVE32F-NEXT:    j .LBB84_5
 ;
 ; RV64ZVE32F-LABEL: mgather_truemask_v4f64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a2, 24(a1)
-; RV64ZVE32F-NEXT:    ld a3, 16(a1)
-; RV64ZVE32F-NEXT:    ld a4, 8(a1)
-; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:    fld fa5, 0(a2)
-; RV64ZVE32F-NEXT:    fld fa4, 0(a3)
-; RV64ZVE32F-NEXT:    fld fa3, 0(a4)
-; RV64ZVE32F-NEXT:    fld fa2, 0(a1)
-; RV64ZVE32F-NEXT:    fsd fa5, 24(a0)
-; RV64ZVE32F-NEXT:    fsd fa4, 16(a0)
-; RV64ZVE32F-NEXT:    fsd fa3, 8(a0)
-; RV64ZVE32F-NEXT:    fsd fa2, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB84_6
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a3, a2, 2
+; RV64ZVE32F-NEXT:    bnez a3, .LBB84_7
+; RV64ZVE32F-NEXT:  .LBB84_2: # %else2
+; RV64ZVE32F-NEXT:    andi a3, a2, 4
+; RV64ZVE32F-NEXT:    bnez a3, .LBB84_8
+; RV64ZVE32F-NEXT:  .LBB84_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a2, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB84_5
+; RV64ZVE32F-NEXT:  .LBB84_4: # %cond.load7
+; RV64ZVE32F-NEXT:    ld a1, 24(a1)
+; RV64ZVE32F-NEXT:    fld fa3, 0(a1)
+; RV64ZVE32F-NEXT:  .LBB84_5: # %else8
+; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
+; RV64ZVE32F-NEXT:    fsd fa1, 8(a0)
+; RV64ZVE32F-NEXT:    fsd fa2, 16(a0)
+; RV64ZVE32F-NEXT:    fsd fa3, 24(a0)
 ; RV64ZVE32F-NEXT:    ret
-  %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1), <4 x double> %passthru)
-  ret <4 x double> %v
-}
-
+; RV64ZVE32F-NEXT:  .LBB84_6: # %cond.load
+; RV64ZVE32F-NEXT:    ld a3, 0(a1)
+; RV64ZVE32F-NEXT:    fld fa0, 0(a3)
+; RV64ZVE32F-NEXT:    andi a3, a2, 2
+; RV64ZVE32F-NEXT:    beqz a3, .LBB84_2
+; RV64ZVE32F-NEXT:  .LBB84_7: # %cond.load1
+; RV64ZVE32F-NEXT:    ld a3, 8(a1)
+; RV64ZVE32F-NEXT:    fld fa1, 0(a3)
+; RV64ZVE32F-NEXT:    andi a3, a2, 4
+; RV64ZVE32F-NEXT:    beqz a3, .LBB84_3
+; RV64ZVE32F-NEXT:  .LBB84_8: # %cond.load4
+; RV64ZVE32F-NEXT:    ld a3, 16(a1)
+; RV64ZVE32F-NEXT:    fld fa2, 0(a3)
+; RV64ZVE32F-NEXT:    andi a2, a2, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB84_4
+; RV64ZVE32F-NEXT:    j .LBB84_5
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue, <4 x double> %passthru)
+  ret <4 x double> %v
+}
+
 define <4 x double> @mgather_falsemask_v4f64(<4 x ptr> %ptrs, <4 x double> %passthru) {
 ; RV32V-LABEL: mgather_falsemask_v4f64:
 ; RV32V:       # %bb.0:
@@ -12551,8 +12850,10 @@ define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12563,9 +12864,11 @@ define <4 x i32> @mgather_broadcast_load_unmasked2(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrhead = insertelement <4 x ptr> poison, ptr %base, i32 0
   %ptrs = shufflevector <4 x ptr> %ptrhead, <4 x ptr> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12575,6 +12878,8 @@ define <4 x i32> @mgather_broadcast_load_masked(ptr %base, <4 x i1> %m) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vlse32.v v8, (a0), zero, v0.t
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
   %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %m, <4 x i32> poison)
   ret <4 x i32> %v
@@ -12586,8 +12891,10 @@ define <4 x i32> @mgather_unit_stride_load(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32>  <i32 0, i32 1, i32 2, i32 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12598,8 +12905,10 @@ define <4 x i32> @mgather_unit_stride_load_with_offset(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i32>  <i32 4, i32 5, i32 6, i32 7>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12609,8 +12918,10 @@ define <4 x i32> @mgather_unit_stride_load_narrow_idx(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i8>  <i8 0, i8 1, i8 2, i8 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12620,8 +12931,10 @@ define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i128>  <i128 0, i128 1, i128 2, i128 3>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12652,15 +12965,51 @@ define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_narrow_edge_case:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, -512
-; RV64ZVE32F-NEXT:    lw a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB106_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB106_6
+; RV64ZVE32F-NEXT:  .LBB106_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB106_7
+; RV64ZVE32F-NEXT:  .LBB106_3: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB106_8
+; RV64ZVE32F-NEXT:  .LBB106_4: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB106_5: # %cond.load
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vlse32.v v8, (a1), zero
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 5
-; RV64ZVE32F-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB106_2
+; RV64ZVE32F-NEXT:  .LBB106_6: # %cond.load1
+; RV64ZVE32F-NEXT:    lw a2, -512(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB106_3
+; RV64ZVE32F-NEXT:  .LBB106_7: # %cond.load4
+; RV64ZVE32F-NEXT:    lw a2, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB106_4
+; RV64ZVE32F-NEXT:  .LBB106_8: # %cond.load7
+; RV64ZVE32F-NEXT:    lw a0, -512(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i32, ptr %base, <4 x i8>  <i8 0, i8 128, i8 0, i8 128>
-  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
   ret <4 x i32> %v
 }
 
@@ -12668,198 +13017,404 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
 ; RV32-LABEL: mgather_strided_unaligned:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    vsll.vi v8, v8, 2
-; RV32-NEXT:    vadd.vx v8, v8, a0
+; RV32-NEXT:    vmset.m v8
+; RV32-NEXT:    vid.v v10
+; RV32-NEXT:    vsll.vi v10, v10, 2
+; RV32-NEXT:    vadd.vx v10, v10, a0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    lbu a1, 0(a0)
-; RV32-NEXT:    lbu a0, 1(a0)
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 1
-; RV32-NEXT:    vmv.x.s a2, v10
-; RV32-NEXT:    lbu a3, 1(a2)
-; RV32-NEXT:    lbu a2, 0(a2)
-; RV32-NEXT:    slli a0, a0, 8
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a2, a3, a2
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
+; RV32-NEXT:    # implicit-def: $v8
+; RV32-NEXT:    beqz zero, .LBB107_9
+; RV32-NEXT:  # %bb.1: # %else
+; RV32-NEXT:    andi a1, a0, 2
+; RV32-NEXT:    bnez a1, .LBB107_10
+; RV32-NEXT:  .LBB107_2: # %else2
+; RV32-NEXT:    andi a1, a0, 4
+; RV32-NEXT:    bnez a1, .LBB107_11
+; RV32-NEXT:  .LBB107_3: # %else5
+; RV32-NEXT:    andi a1, a0, 8
+; RV32-NEXT:    bnez a1, .LBB107_12
+; RV32-NEXT:  .LBB107_4: # %else8
+; RV32-NEXT:    andi a1, a0, 16
+; RV32-NEXT:    bnez a1, .LBB107_13
+; RV32-NEXT:  .LBB107_5: # %else11
+; RV32-NEXT:    andi a1, a0, 32
+; RV32-NEXT:    bnez a1, .LBB107_14
+; RV32-NEXT:  .LBB107_6: # %else14
+; RV32-NEXT:    andi a1, a0, 64
+; RV32-NEXT:    bnez a1, .LBB107_15
+; RV32-NEXT:  .LBB107_7: # %else17
+; RV32-NEXT:    andi a0, a0, -128
+; RV32-NEXT:    bnez a0, .LBB107_16
+; RV32-NEXT:  .LBB107_8: # %else20
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB107_9: # %cond.load
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; RV32-NEXT:    vmv.x.s a1, v10
-; RV32-NEXT:    lbu a3, 0(a1)
-; RV32-NEXT:    lbu a1, 1(a1)
-; RV32-NEXT:    vslidedown.vi v10, v8, 3
-; RV32-NEXT:    vmv.x.s a4, v10
-; RV32-NEXT:    lbu a5, 1(a4)
-; RV32-NEXT:    lbu a4, 0(a4)
-; RV32-NEXT:    slli a1, a1, 8
-; RV32-NEXT:    or a1, a1, a3
-; RV32-NEXT:    slli a5, a5, 8
-; RV32-NEXT:    or a4, a5, a4
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    andi a1, a0, 2
+; RV32-NEXT:    beqz a1, .LBB107_2
+; RV32-NEXT:  .LBB107_10: # %cond.load1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v10, 1
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 1
+; RV32-NEXT:    andi a1, a0, 4
+; RV32-NEXT:    beqz a1, .LBB107_3
+; RV32-NEXT:  .LBB107_11: # %cond.load4
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v10, 2
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 2
+; RV32-NEXT:    andi a1, a0, 8
+; RV32-NEXT:    beqz a1, .LBB107_4
+; RV32-NEXT:  .LBB107_12: # %cond.load7
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v10, 3
+; RV32-NEXT:    vmv.x.s a1, v9
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 3
+; RV32-NEXT:    andi a1, a0, 16
+; RV32-NEXT:    beqz a1, .LBB107_5
+; RV32-NEXT:  .LBB107_13: # %cond.load10
 ; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 4
-; RV32-NEXT:    vmv.x.s a3, v10
-; RV32-NEXT:    lbu a5, 0(a3)
-; RV32-NEXT:    lbu a3, 1(a3)
-; RV32-NEXT:    vslidedown.vi v10, v8, 5
-; RV32-NEXT:    vmv.x.s a6, v10
-; RV32-NEXT:    lbu a7, 1(a6)
-; RV32-NEXT:    lbu a6, 0(a6)
-; RV32-NEXT:    slli a3, a3, 8
-; RV32-NEXT:    or a3, a3, a5
-; RV32-NEXT:    slli a7, a7, 8
-; RV32-NEXT:    or a5, a7, a6
-; RV32-NEXT:    vslidedown.vi v10, v8, 6
-; RV32-NEXT:    vmv.x.s a6, v10
-; RV32-NEXT:    lbu a7, 0(a6)
-; RV32-NEXT:    lbu a6, 1(a6)
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vmv.x.s t0, v8
-; RV32-NEXT:    lbu t1, 1(t0)
-; RV32-NEXT:    lbu t0, 0(t0)
-; RV32-NEXT:    slli a6, a6, 8
-; RV32-NEXT:    or a6, a6, a7
-; RV32-NEXT:    slli t1, t1, 8
-; RV32-NEXT:    or a7, t1, t0
-; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v9, v8, a4
-; RV32-NEXT:    vmv.v.x v8, a3
-; RV32-NEXT:    vslide1down.vx v8, v8, a5
-; RV32-NEXT:    vslide1down.vx v8, v8, a6
-; RV32-NEXT:    vmv.v.i v0, 15
-; RV32-NEXT:    vslide1down.vx v8, v8, a7
-; RV32-NEXT:    vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT:    vslidedown.vi v12, v10, 4
+; RV32-NEXT:    vmv.x.s a1, v12
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 4
+; RV32-NEXT:    andi a1, a0, 32
+; RV32-NEXT:    beqz a1, .LBB107_6
+; RV32-NEXT:  .LBB107_14: # %cond.load13
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v10, 5
+; RV32-NEXT:    vmv.x.s a1, v12
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 5
+; RV32-NEXT:    andi a1, a0, 64
+; RV32-NEXT:    beqz a1, .LBB107_7
+; RV32-NEXT:  .LBB107_15: # %cond.load16
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v12, v10, 6
+; RV32-NEXT:    vmv.x.s a1, v12
+; RV32-NEXT:    lbu a2, 1(a1)
+; RV32-NEXT:    lbu a1, 0(a1)
+; RV32-NEXT:    slli a2, a2, 8
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    vmv.s.x v9, a1
+; RV32-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 6
+; RV32-NEXT:    andi a0, a0, -128
+; RV32-NEXT:    beqz a0, .LBB107_8
+; RV32-NEXT:  .LBB107_16: # %cond.load19
+; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v10, 7
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    lbu a1, 1(a0)
+; RV32-NEXT:    lbu a0, 0(a0)
+; RV32-NEXT:    slli a1, a1, 8
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vslideup.vi v8, v9, 7
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_strided_unaligned:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi sp, sp, -128
-; RV64V-NEXT:    .cfi_def_cfa_offset 128
-; RV64V-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64V-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64V-NEXT:    vmset.m v8
+; RV64V-NEXT:    vid.v v12
+; RV64V-NEXT:    vsll.vi v12, v12, 2
+; RV64V-NEXT:    vadd.vx v12, v12, a0
+; RV64V-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; RV64V-NEXT:    vmv.x.s a0, v8
+; RV64V-NEXT:    # implicit-def: $v8
+; RV64V-NEXT:    beqz zero, .LBB107_11
+; RV64V-NEXT:  # %bb.1: # %else
+; RV64V-NEXT:    andi a1, a0, 2
+; RV64V-NEXT:    bnez a1, .LBB107_12
+; RV64V-NEXT:  .LBB107_2: # %else2
+; RV64V-NEXT:    andi a1, a0, 4
+; RV64V-NEXT:    bnez a1, .LBB107_13
+; RV64V-NEXT:  .LBB107_3: # %else5
+; RV64V-NEXT:    andi a1, a0, 8
+; RV64V-NEXT:    beqz a1, .LBB107_5
+; RV64V-NEXT:  .LBB107_4: # %cond.load7
+; RV64V-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV64V-NEXT:    vslidedown.vi v10, v12, 3
+; RV64V-NEXT:    vmv.x.s a1, v10
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 3
+; RV64V-NEXT:  .LBB107_5: # %else8
+; RV64V-NEXT:    addi sp, sp, -320
+; RV64V-NEXT:    .cfi_def_cfa_offset 320
+; RV64V-NEXT:    sd ra, 312(sp) # 8-byte Folded Spill
+; RV64V-NEXT:    sd s0, 304(sp) # 8-byte Folded Spill
 ; RV64V-NEXT:    .cfi_offset ra, -8
 ; RV64V-NEXT:    .cfi_offset s0, -16
-; RV64V-NEXT:    addi s0, sp, 128
+; RV64V-NEXT:    addi s0, sp, 320
 ; RV64V-NEXT:    .cfi_def_cfa s0, 0
 ; RV64V-NEXT:    andi sp, sp, -64
+; RV64V-NEXT:    andi a1, a0, 16
+; RV64V-NEXT:    bnez a1, .LBB107_14
+; RV64V-NEXT:  # %bb.6: # %else11
+; RV64V-NEXT:    andi a1, a0, 32
+; RV64V-NEXT:    bnez a1, .LBB107_15
+; RV64V-NEXT:  .LBB107_7: # %else14
+; RV64V-NEXT:    andi a1, a0, 64
+; RV64V-NEXT:    bnez a1, .LBB107_16
+; RV64V-NEXT:  .LBB107_8: # %else17
+; RV64V-NEXT:    andi a0, a0, -128
+; RV64V-NEXT:    beqz a0, .LBB107_10
+; RV64V-NEXT:  .LBB107_9: # %cond.load19
+; RV64V-NEXT:    mv a0, sp
 ; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64V-NEXT:    vid.v v8
-; RV64V-NEXT:    vsll.vi v8, v8, 2
-; RV64V-NEXT:    vadd.vx v8, v8, a0
-; RV64V-NEXT:    vmv.x.s a0, v8
-; RV64V-NEXT:    lbu a1, 0(a0)
-; RV64V-NEXT:    lbu a0, 1(a0)
+; RV64V-NEXT:    vse64.v v12, (a0)
+; RV64V-NEXT:    ld a0, 56(sp)
+; RV64V-NEXT:    lbu a1, 1(a0)
+; RV64V-NEXT:    lbu a0, 0(a0)
+; RV64V-NEXT:    slli a1, a1, 8
+; RV64V-NEXT:    or a0, a1, a0
+; RV64V-NEXT:    vmv.s.x v9, a0
+; RV64V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 7
+; RV64V-NEXT:  .LBB107_10: # %else20
+; RV64V-NEXT:    addi sp, s0, -320
+; RV64V-NEXT:    ld ra, 312(sp) # 8-byte Folded Reload
+; RV64V-NEXT:    ld s0, 304(sp) # 8-byte Folded Reload
+; RV64V-NEXT:    addi sp, sp, 320
+; RV64V-NEXT:    ret
+; RV64V-NEXT:  .LBB107_11: # %cond.load
+; RV64V-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64V-NEXT:    vmv.x.s a1, v12
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64V-NEXT:    vmv.v.x v8, a1
+; RV64V-NEXT:    andi a1, a0, 2
+; RV64V-NEXT:    beqz a1, .LBB107_2
+; RV64V-NEXT:  .LBB107_12: # %cond.load1
 ; RV64V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64V-NEXT:    vslidedown.vi v12, v8, 1
-; RV64V-NEXT:    vmv.x.s a2, v12
-; RV64V-NEXT:    lbu a3, 1(a2)
-; RV64V-NEXT:    lbu a2, 0(a2)
-; RV64V-NEXT:    slli a0, a0, 8
-; RV64V-NEXT:    or a0, a0, a1
-; RV64V-NEXT:    slli a1, a3, 8
-; RV64V-NEXT:    or a1, a1, a2
+; RV64V-NEXT:    vslidedown.vi v9, v12, 1
+; RV64V-NEXT:    vmv.x.s a1, v9
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 1
+; RV64V-NEXT:    andi a1, a0, 4
+; RV64V-NEXT:    beqz a1, .LBB107_3
+; RV64V-NEXT:  .LBB107_13: # %cond.load4
 ; RV64V-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64V-NEXT:    vslidedown.vi v12, v8, 2
-; RV64V-NEXT:    vmv.x.s a2, v12
-; RV64V-NEXT:    lbu a3, 0(a2)
-; RV64V-NEXT:    lbu a2, 1(a2)
-; RV64V-NEXT:    vslidedown.vi v12, v8, 3
-; RV64V-NEXT:    vmv.x.s a4, v12
-; RV64V-NEXT:    lbu a5, 0(a4)
-; RV64V-NEXT:    lbu a4, 1(a4)
-; RV64V-NEXT:    mv a6, sp
-; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64V-NEXT:    vse64.v v8, (a6)
-; RV64V-NEXT:    ld a6, 32(sp)
+; RV64V-NEXT:    vslidedown.vi v10, v12, 2
+; RV64V-NEXT:    vmv.x.s a1, v10
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
 ; RV64V-NEXT:    slli a2, a2, 8
-; RV64V-NEXT:    or a2, a2, a3
-; RV64V-NEXT:    slli a4, a4, 8
-; RV64V-NEXT:    lbu a3, 1(a6)
-; RV64V-NEXT:    ld a7, 40(sp)
-; RV64V-NEXT:    lbu a6, 0(a6)
-; RV64V-NEXT:    or a4, a4, a5
-; RV64V-NEXT:    slli a3, a3, 8
-; RV64V-NEXT:    lbu a5, 1(a7)
-; RV64V-NEXT:    or a3, a3, a6
-; RV64V-NEXT:    lbu a6, 0(a7)
-; RV64V-NEXT:    ld a7, 48(sp)
-; RV64V-NEXT:    slli a5, a5, 8
-; RV64V-NEXT:    ld t0, 56(sp)
-; RV64V-NEXT:    or a5, a5, a6
-; RV64V-NEXT:    lbu a6, 1(a7)
-; RV64V-NEXT:    lbu a7, 0(a7)
-; RV64V-NEXT:    lbu t1, 1(t0)
-; RV64V-NEXT:    lbu t0, 0(t0)
-; RV64V-NEXT:    slli a6, a6, 8
-; RV64V-NEXT:    or a6, a6, a7
-; RV64V-NEXT:    slli t1, t1, 8
-; RV64V-NEXT:    or a7, t1, t0
-; RV64V-NEXT:    vmv.v.x v8, a0
-; RV64V-NEXT:    vslide1down.vx v8, v8, a1
-; RV64V-NEXT:    vslide1down.vx v8, v8, a2
-; RV64V-NEXT:    vslide1down.vx v9, v8, a4
-; RV64V-NEXT:    vmv.v.x v8, a3
-; RV64V-NEXT:    vslide1down.vx v8, v8, a5
-; RV64V-NEXT:    vslide1down.vx v8, v8, a6
-; RV64V-NEXT:    vmv.v.i v0, 15
-; RV64V-NEXT:    vslide1down.vx v8, v8, a7
-; RV64V-NEXT:    vslidedown.vi v8, v9, 4, v0.t
-; RV64V-NEXT:    addi sp, s0, -128
-; RV64V-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64V-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64V-NEXT:    addi sp, sp, 128
-; RV64V-NEXT:    ret
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 2
+; RV64V-NEXT:    andi a1, a0, 8
+; RV64V-NEXT:    bnez a1, .LBB107_4
+; RV64V-NEXT:    j .LBB107_5
+; RV64V-NEXT:  .LBB107_14: # %cond.load10
+; RV64V-NEXT:    addi a1, sp, 192
+; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64V-NEXT:    vse64.v v12, (a1)
+; RV64V-NEXT:    ld a1, 224(sp)
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 4
+; RV64V-NEXT:    andi a1, a0, 32
+; RV64V-NEXT:    beqz a1, .LBB107_7
+; RV64V-NEXT:  .LBB107_15: # %cond.load13
+; RV64V-NEXT:    addi a1, sp, 128
+; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64V-NEXT:    vse64.v v12, (a1)
+; RV64V-NEXT:    ld a1, 168(sp)
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 5
+; RV64V-NEXT:    andi a1, a0, 64
+; RV64V-NEXT:    beqz a1, .LBB107_8
+; RV64V-NEXT:  .LBB107_16: # %cond.load16
+; RV64V-NEXT:    addi a1, sp, 64
+; RV64V-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64V-NEXT:    vse64.v v12, (a1)
+; RV64V-NEXT:    ld a1, 112(sp)
+; RV64V-NEXT:    lbu a2, 1(a1)
+; RV64V-NEXT:    lbu a1, 0(a1)
+; RV64V-NEXT:    slli a2, a2, 8
+; RV64V-NEXT:    or a1, a2, a1
+; RV64V-NEXT:    vmv.s.x v9, a1
+; RV64V-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64V-NEXT:    vslideup.vi v8, v9, 6
+; RV64V-NEXT:    andi a0, a0, -128
+; RV64V-NEXT:    bnez a0, .LBB107_9
+; RV64V-NEXT:    j .LBB107_10
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_unaligned:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lbu a1, 1(a0)
-; RV64ZVE32F-NEXT:    lbu a2, 0(a0)
-; RV64ZVE32F-NEXT:    lbu a3, 5(a0)
-; RV64ZVE32F-NEXT:    lbu a4, 4(a0)
-; RV64ZVE32F-NEXT:    slli a1, a1, 8
-; RV64ZVE32F-NEXT:    or a1, a1, a2
-; RV64ZVE32F-NEXT:    slli a3, a3, 8
-; RV64ZVE32F-NEXT:    or a3, a3, a4
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB107_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_10
+; RV64ZVE32F-NEXT:  .LBB107_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_11
+; RV64ZVE32F-NEXT:  .LBB107_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_12
+; RV64ZVE32F-NEXT:  .LBB107_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_13
+; RV64ZVE32F-NEXT:  .LBB107_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_14
+; RV64ZVE32F-NEXT:  .LBB107_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_15
+; RV64ZVE32F-NEXT:  .LBB107_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB107_16
+; RV64ZVE32F-NEXT:  .LBB107_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB107_9: # %cond.load
+; RV64ZVE32F-NEXT:    lbu a2, 1(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 0(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.v.x v8, a2
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_2
+; RV64ZVE32F-NEXT:  .LBB107_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lbu a2, 5(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 4(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_3
+; RV64ZVE32F-NEXT:  .LBB107_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    lbu a2, 9(a0)
-; RV64ZVE32F-NEXT:    lbu a4, 8(a0)
-; RV64ZVE32F-NEXT:    lbu a5, 13(a0)
-; RV64ZVE32F-NEXT:    lbu a6, 12(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 8(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_4
+; RV64ZVE32F-NEXT:  .LBB107_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lbu a2, 13(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 12(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_5
+; RV64ZVE32F-NEXT:  .LBB107_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lbu a2, 17(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 16(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_6
+; RV64ZVE32F-NEXT:  .LBB107_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lbu a2, 21(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 20(a0)
+; RV64ZVE32F-NEXT:    slli a2, a2, 8
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_7
+; RV64ZVE32F-NEXT:  .LBB107_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lbu a2, 25(a0)
+; RV64ZVE32F-NEXT:    lbu a3, 24(a0)
 ; RV64ZVE32F-NEXT:    slli a2, a2, 8
-; RV64ZVE32F-NEXT:    or a2, a2, a4
-; RV64ZVE32F-NEXT:    slli a5, a5, 8
-; RV64ZVE32F-NEXT:    or a4, a5, a6
-; RV64ZVE32F-NEXT:    lbu a5, 17(a0)
-; RV64ZVE32F-NEXT:    lbu a6, 16(a0)
-; RV64ZVE32F-NEXT:    lbu a7, 21(a0)
-; RV64ZVE32F-NEXT:    lbu t0, 20(a0)
-; RV64ZVE32F-NEXT:    slli a5, a5, 8
-; RV64ZVE32F-NEXT:    or a5, a5, a6
-; RV64ZVE32F-NEXT:    slli a7, a7, 8
-; RV64ZVE32F-NEXT:    or a6, a7, t0
-; RV64ZVE32F-NEXT:    lbu a7, 25(a0)
-; RV64ZVE32F-NEXT:    lbu t0, 24(a0)
-; RV64ZVE32F-NEXT:    lbu t1, 29(a0)
+; RV64ZVE32F-NEXT:    or a2, a2, a3
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB107_8
+; RV64ZVE32F-NEXT:  .LBB107_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lbu a1, 29(a0)
 ; RV64ZVE32F-NEXT:    lbu a0, 28(a0)
-; RV64ZVE32F-NEXT:    slli a7, a7, 8
-; RV64ZVE32F-NEXT:    or a7, a7, t0
-; RV64ZVE32F-NEXT:    slli t1, t1, 8
-; RV64ZVE32F-NEXT:    or a0, t1, a0
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vmv.v.x v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v8, a4
-; RV64ZVE32F-NEXT:    vmv.v.x v8, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a0
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v9, 4, v0.t
+; RV64ZVE32F-NEXT:    slli a1, a1, 8
+; RV64ZVE32F-NEXT:    or a0, a1, a0
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 1, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -12881,27 +13436,91 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 2(a0)
-; RV64ZVE32F-NEXT:    lh a2, 8(a0)
-; RV64ZVE32F-NEXT:    lh a3, 10(a0)
-; RV64ZVE32F-NEXT:    lh a4, 18(a0)
-; RV64ZVE32F-NEXT:    lh a5, 24(a0)
-; RV64ZVE32F-NEXT:    lh a6, 26(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB108_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_10
+; RV64ZVE32F-NEXT:  .LBB108_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_11
+; RV64ZVE32F-NEXT:  .LBB108_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_12
+; RV64ZVE32F-NEXT:  .LBB108_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_13
+; RV64ZVE32F-NEXT:  .LBB108_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_14
+; RV64ZVE32F-NEXT:  .LBB108_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_15
+; RV64ZVE32F-NEXT:  .LBB108_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB108_16
+; RV64ZVE32F-NEXT:  .LBB108_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB108_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 16
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_2
+; RV64ZVE32F-NEXT:  .LBB108_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_3
+; RV64ZVE32F-NEXT:  .LBB108_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_4
+; RV64ZVE32F-NEXT:  .LBB108_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_5
+; RV64ZVE32F-NEXT:  .LBB108_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 16(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_6
+; RV64ZVE32F-NEXT:  .LBB108_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_7
+; RV64ZVE32F-NEXT:  .LBB108_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 24(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB108_8
+; RV64ZVE32F-NEXT:  .LBB108_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 26(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -12925,28 +13544,92 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_2xSEW_with_offset:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, 4
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB109_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_10
+; RV64ZVE32F-NEXT:  .LBB109_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_11
+; RV64ZVE32F-NEXT:  .LBB109_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_12
+; RV64ZVE32F-NEXT:  .LBB109_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_13
+; RV64ZVE32F-NEXT:  .LBB109_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_14
+; RV64ZVE32F-NEXT:  .LBB109_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB109_15
+; RV64ZVE32F-NEXT:  .LBB109_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB109_16
+; RV64ZVE32F-NEXT:  .LBB109_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB109_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_2
+; RV64ZVE32F-NEXT:  .LBB109_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    lh a3, 12(a0)
-; RV64ZVE32F-NEXT:    lh a4, 14(a0)
-; RV64ZVE32F-NEXT:    lh a5, 22(a0)
-; RV64ZVE32F-NEXT:    lh a6, 28(a0)
-; RV64ZVE32F-NEXT:    lh a7, 30(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 20
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_3
+; RV64ZVE32F-NEXT:  .LBB109_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 12(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_4
+; RV64ZVE32F-NEXT:  .LBB109_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 14(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_5
+; RV64ZVE32F-NEXT:  .LBB109_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_6
+; RV64ZVE32F-NEXT:  .LBB109_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB109_7
+; RV64ZVE32F-NEXT:  .LBB109_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 28(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB109_8
+; RV64ZVE32F-NEXT:  .LBB109_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 30(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 2, i64 3, i64 6, i64 7, i64 10, i64 11, i64 14, i64 15>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -12970,73 +13653,201 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_reverse_unit_strided_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, 28
-; RV64ZVE32F-NEXT:    lh a2, 30(a0)
-; RV64ZVE32F-NEXT:    lh a3, 24(a0)
-; RV64ZVE32F-NEXT:    lh a4, 26(a0)
-; RV64ZVE32F-NEXT:    lh a5, 22(a0)
-; RV64ZVE32F-NEXT:    lh a6, 16(a0)
-; RV64ZVE32F-NEXT:    lh a7, 18(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 20
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB110_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_10
+; RV64ZVE32F-NEXT:  .LBB110_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_11
+; RV64ZVE32F-NEXT:  .LBB110_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_12
+; RV64ZVE32F-NEXT:  .LBB110_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_13
+; RV64ZVE32F-NEXT:  .LBB110_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_14
+; RV64ZVE32F-NEXT:  .LBB110_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB110_15
+; RV64ZVE32F-NEXT:  .LBB110_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB110_16
+; RV64ZVE32F-NEXT:  .LBB110_8: # %else20
 ; RV64ZVE32F-NEXT:    ret
-  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 14, i64 15, i64 12, i64 13, i64 10, i64 11, i64 8, i64 9>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
-  ret <8 x i16> %v
-}
-
-; TODO: Recognize as strided load with SEW=32
-define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
-; RV32-LABEL: mgather_reverse_strided_2xSEW:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 28
-; RV32-NEXT:    li a1, -8
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vlse32.v v8, (a0), a1
-; RV32-NEXT:    ret
-;
-; RV64V-LABEL: mgather_reverse_strided_2xSEW:
-; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, a0, 28
-; RV64V-NEXT:    li a1, -8
-; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64V-NEXT:    vlse32.v v8, (a0), a1
-; RV64V-NEXT:    ret
-;
-; RV64ZVE32F-LABEL: mgather_reverse_strided_2xSEW:
-; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, 28
+; RV64ZVE32F-NEXT:  .LBB110_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 28
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_2
+; RV64ZVE32F-NEXT:  .LBB110_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a2, 30(a0)
-; RV64ZVE32F-NEXT:    lh a3, 20(a0)
-; RV64ZVE32F-NEXT:    lh a4, 22(a0)
-; RV64ZVE32F-NEXT:    lh a5, 14(a0)
-; RV64ZVE32F-NEXT:    lh a6, 4(a0)
-; RV64ZVE32F-NEXT:    lh a7, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 12
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_3
+; RV64ZVE32F-NEXT:  .LBB110_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 24(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_4
+; RV64ZVE32F-NEXT:  .LBB110_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 26(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_5
+; RV64ZVE32F-NEXT:  .LBB110_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_6
+; RV64ZVE32F-NEXT:  .LBB110_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB110_7
+; RV64ZVE32F-NEXT:  .LBB110_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 16(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB110_8
+; RV64ZVE32F-NEXT:  .LBB110_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 14, i64 15, i64 12, i64 13, i64 10, i64 11, i64 8, i64 9>
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
+  ret <8 x i16> %v
+}
+
+; TODO: Recognize as strided load with SEW=32
+define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
+; RV32-LABEL: mgather_reverse_strided_2xSEW:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a0, a0, 28
+; RV32-NEXT:    li a1, -8
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vlse32.v v8, (a0), a1
+; RV32-NEXT:    ret
+;
+; RV64V-LABEL: mgather_reverse_strided_2xSEW:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    addi a0, a0, 28
+; RV64V-NEXT:    li a1, -8
+; RV64V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64V-NEXT:    vlse32.v v8, (a0), a1
+; RV64V-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mgather_reverse_strided_2xSEW:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB111_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_10
+; RV64ZVE32F-NEXT:  .LBB111_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_11
+; RV64ZVE32F-NEXT:  .LBB111_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_12
+; RV64ZVE32F-NEXT:  .LBB111_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_13
+; RV64ZVE32F-NEXT:  .LBB111_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_14
+; RV64ZVE32F-NEXT:  .LBB111_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB111_15
+; RV64ZVE32F-NEXT:  .LBB111_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB111_16
+; RV64ZVE32F-NEXT:  .LBB111_8: # %else20
 ; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB111_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 28
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_2
+; RV64ZVE32F-NEXT:  .LBB111_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 30(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_3
+; RV64ZVE32F-NEXT:  .LBB111_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_4
+; RV64ZVE32F-NEXT:  .LBB111_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_5
+; RV64ZVE32F-NEXT:  .LBB111_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 12(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_6
+; RV64ZVE32F-NEXT:  .LBB111_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 14(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB111_7
+; RV64ZVE32F-NEXT:  .LBB111_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB111_8
+; RV64ZVE32F-NEXT:  .LBB111_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 14, i64 15, i64 10, i64 11, i64 6, i64 7, i64 2, i64 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13059,27 +13870,91 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 2(a0)
-; RV64ZVE32F-NEXT:    lh a2, 16(a0)
-; RV64ZVE32F-NEXT:    lh a3, 18(a0)
-; RV64ZVE32F-NEXT:    lh a4, 10(a0)
-; RV64ZVE32F-NEXT:    lh a5, 4(a0)
-; RV64ZVE32F-NEXT:    lh a6, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB112_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_10
+; RV64ZVE32F-NEXT:  .LBB112_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_11
+; RV64ZVE32F-NEXT:  .LBB112_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_12
+; RV64ZVE32F-NEXT:  .LBB112_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_13
+; RV64ZVE32F-NEXT:  .LBB112_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_14
+; RV64ZVE32F-NEXT:  .LBB112_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB112_15
+; RV64ZVE32F-NEXT:  .LBB112_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB112_16
+; RV64ZVE32F-NEXT:  .LBB112_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB112_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_2
+; RV64ZVE32F-NEXT:  .LBB112_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_3
+; RV64ZVE32F-NEXT:  .LBB112_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 16(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_4
+; RV64ZVE32F-NEXT:  .LBB112_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_5
+; RV64ZVE32F-NEXT:  .LBB112_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_6
+; RV64ZVE32F-NEXT:  .LBB112_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB112_7
+; RV64ZVE32F-NEXT:  .LBB112_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB112_8
+; RV64ZVE32F-NEXT:  .LBB112_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13105,27 +13980,91 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW_unaligned:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 2(a0)
-; RV64ZVE32F-NEXT:    lh a2, 18(a0)
-; RV64ZVE32F-NEXT:    lh a3, 20(a0)
-; RV64ZVE32F-NEXT:    lh a4, 10(a0)
-; RV64ZVE32F-NEXT:    lh a5, 4(a0)
-; RV64ZVE32F-NEXT:    lh a6, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB113_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_10
+; RV64ZVE32F-NEXT:  .LBB113_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_11
+; RV64ZVE32F-NEXT:  .LBB113_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_12
+; RV64ZVE32F-NEXT:  .LBB113_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_13
+; RV64ZVE32F-NEXT:  .LBB113_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_14
+; RV64ZVE32F-NEXT:  .LBB113_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB113_15
+; RV64ZVE32F-NEXT:  .LBB113_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB113_16
+; RV64ZVE32F-NEXT:  .LBB113_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB113_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_2
+; RV64ZVE32F-NEXT:  .LBB113_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_3
+; RV64ZVE32F-NEXT:  .LBB113_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_4
+; RV64ZVE32F-NEXT:  .LBB113_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_5
+; RV64ZVE32F-NEXT:  .LBB113_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_6
+; RV64ZVE32F-NEXT:  .LBB113_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB113_7
+; RV64ZVE32F-NEXT:  .LBB113_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB113_8
+; RV64ZVE32F-NEXT:  .LBB113_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 2, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13152,27 +14091,92 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned2(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW_unaligned2:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, 2
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB114_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_10
+; RV64ZVE32F-NEXT:  .LBB114_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_11
+; RV64ZVE32F-NEXT:  .LBB114_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_12
+; RV64ZVE32F-NEXT:  .LBB114_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_13
+; RV64ZVE32F-NEXT:  .LBB114_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_14
+; RV64ZVE32F-NEXT:  .LBB114_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB114_15
+; RV64ZVE32F-NEXT:  .LBB114_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB114_16
+; RV64ZVE32F-NEXT:  .LBB114_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB114_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 2
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_2
+; RV64ZVE32F-NEXT:  .LBB114_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    lh a3, 18(a0)
-; RV64ZVE32F-NEXT:    lh a4, 20(a0)
-; RV64ZVE32F-NEXT:    lh a5, 10(a0)
-; RV64ZVE32F-NEXT:    lh a6, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a1), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_3
+; RV64ZVE32F-NEXT:  .LBB114_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_4
+; RV64ZVE32F-NEXT:  .LBB114_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_5
+; RV64ZVE32F-NEXT:  .LBB114_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_6
+; RV64ZVE32F-NEXT:  .LBB114_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB114_7
+; RV64ZVE32F-NEXT:  .LBB114_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB114_8
+; RV64ZVE32F-NEXT:  .LBB114_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 1, i32 2, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13202,27 +14206,91 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_4xSEW:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 2(a0)
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    lh a3, 6(a0)
-; RV64ZVE32F-NEXT:    lh a4, 18(a0)
-; RV64ZVE32F-NEXT:    lh a5, 20(a0)
-; RV64ZVE32F-NEXT:    lh a6, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB115_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_10
+; RV64ZVE32F-NEXT:  .LBB115_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_11
+; RV64ZVE32F-NEXT:  .LBB115_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_12
+; RV64ZVE32F-NEXT:  .LBB115_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_13
+; RV64ZVE32F-NEXT:  .LBB115_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_14
+; RV64ZVE32F-NEXT:  .LBB115_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB115_15
+; RV64ZVE32F-NEXT:  .LBB115_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB115_16
+; RV64ZVE32F-NEXT:  .LBB115_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB115_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 16
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_2
+; RV64ZVE32F-NEXT:  .LBB115_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_3
+; RV64ZVE32F-NEXT:  .LBB115_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_4
+; RV64ZVE32F-NEXT:  .LBB115_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_5
+; RV64ZVE32F-NEXT:  .LBB115_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 16(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_6
+; RV64ZVE32F-NEXT:  .LBB115_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB115_7
+; RV64ZVE32F-NEXT:  .LBB115_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB115_8
+; RV64ZVE32F-NEXT:  .LBB115_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 8, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 8, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13249,27 +14317,91 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_4xSEW_partial_align:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 2(a0)
-; RV64ZVE32F-NEXT:    lh a2, 4(a0)
-; RV64ZVE32F-NEXT:    lh a3, 6(a0)
-; RV64ZVE32F-NEXT:    lh a4, 18(a0)
-; RV64ZVE32F-NEXT:    lh a5, 20(a0)
-; RV64ZVE32F-NEXT:    lh a6, 22(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB116_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_10
+; RV64ZVE32F-NEXT:  .LBB116_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_11
+; RV64ZVE32F-NEXT:  .LBB116_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_12
+; RV64ZVE32F-NEXT:  .LBB116_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_13
+; RV64ZVE32F-NEXT:  .LBB116_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_14
+; RV64ZVE32F-NEXT:  .LBB116_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB116_15
+; RV64ZVE32F-NEXT:  .LBB116_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB116_16
+; RV64ZVE32F-NEXT:  .LBB116_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB116_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 16
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_2
+; RV64ZVE32F-NEXT:  .LBB116_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_3
+; RV64ZVE32F-NEXT:  .LBB116_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_4
+; RV64ZVE32F-NEXT:  .LBB116_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_5
+; RV64ZVE32F-NEXT:  .LBB116_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 16(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_6
+; RV64ZVE32F-NEXT:  .LBB116_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 18(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB116_7
+; RV64ZVE32F-NEXT:  .LBB116_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 20(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB116_8
+; RV64ZVE32F-NEXT:  .LBB116_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 22(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i32>  <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13281,8 +14413,10 @@ define <8 x i16> @mgather_shuffle_reverse(ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13305,27 +14439,92 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_shuffle_rotate:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB118_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_10
+; RV64ZVE32F-NEXT:  .LBB118_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_11
+; RV64ZVE32F-NEXT:  .LBB118_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_12
+; RV64ZVE32F-NEXT:  .LBB118_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_13
+; RV64ZVE32F-NEXT:  .LBB118_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_14
+; RV64ZVE32F-NEXT:  .LBB118_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB118_15
+; RV64ZVE32F-NEXT:  .LBB118_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB118_16
+; RV64ZVE32F-NEXT:  .LBB118_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB118_9: # %cond.load
+; RV64ZVE32F-NEXT:    addi a2, a0, 8
+; RV64ZVE32F-NEXT:    vlse16.v v8, (a2), zero
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_2
+; RV64ZVE32F-NEXT:  .LBB118_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_3
+; RV64ZVE32F-NEXT:  .LBB118_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    lh a2, 12(a0)
-; RV64ZVE32F-NEXT:    lh a3, 14(a0)
-; RV64ZVE32F-NEXT:    lh a4, 2(a0)
-; RV64ZVE32F-NEXT:    lh a5, 4(a0)
-; RV64ZVE32F-NEXT:    lh a6, 6(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a2
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v9, v9, a3
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v9, 4, v0.t
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_4
+; RV64ZVE32F-NEXT:  .LBB118_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 14(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_5
+; RV64ZVE32F-NEXT:  .LBB118_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_6
+; RV64ZVE32F-NEXT:  .LBB118_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB118_7
+; RV64ZVE32F-NEXT:  .LBB118_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB118_8
+; RV64ZVE32F-NEXT:  .LBB118_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13352,27 +14551,91 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mgather_shuffle_vrgather:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    lh a1, 4(a0)
-; RV64ZVE32F-NEXT:    lh a2, 6(a0)
-; RV64ZVE32F-NEXT:    lh a3, 2(a0)
-; RV64ZVE32F-NEXT:    lh a4, 10(a0)
-; RV64ZVE32F-NEXT:    lh a5, 12(a0)
-; RV64ZVE32F-NEXT:    lh a6, 14(a0)
-; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB119_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_10
+; RV64ZVE32F-NEXT:  .LBB119_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_11
+; RV64ZVE32F-NEXT:  .LBB119_3: # %else5
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_12
+; RV64ZVE32F-NEXT:  .LBB119_4: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_13
+; RV64ZVE32F-NEXT:  .LBB119_5: # %else11
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_14
+; RV64ZVE32F-NEXT:  .LBB119_6: # %else14
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB119_15
+; RV64ZVE32F-NEXT:  .LBB119_7: # %else17
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB119_16
+; RV64ZVE32F-NEXT:  .LBB119_8: # %else20
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB119_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
-; RV64ZVE32F-NEXT:    addi a0, a0, 8
-; RV64ZVE32F-NEXT:    vlse16.v v9, (a0), zero
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT:    vslide1down.vx v10, v8, a3
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v9, a4
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a5
-; RV64ZVE32F-NEXT:    vmv.v.i v0, 15
-; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT:    vslidedown.vi v8, v10, 4, v0.t
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_2
+; RV64ZVE32F-NEXT:  .LBB119_10: # %cond.load1
+; RV64ZVE32F-NEXT:    lh a2, 4(a0)
+; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_3
+; RV64ZVE32F-NEXT:  .LBB119_11: # %cond.load4
+; RV64ZVE32F-NEXT:    lh a2, 6(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_4
+; RV64ZVE32F-NEXT:  .LBB119_12: # %cond.load7
+; RV64ZVE32F-NEXT:    lh a2, 2(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_5
+; RV64ZVE32F-NEXT:  .LBB119_13: # %cond.load10
+; RV64ZVE32F-NEXT:    lh a2, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_6
+; RV64ZVE32F-NEXT:  .LBB119_14: # %cond.load13
+; RV64ZVE32F-NEXT:    lh a2, 10(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB119_7
+; RV64ZVE32F-NEXT:  .LBB119_15: # %cond.load16
+; RV64ZVE32F-NEXT:    lh a2, 12(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB119_8
+; RV64ZVE32F-NEXT:  .LBB119_16: # %cond.load19
+; RV64ZVE32F-NEXT:    lh a0, 14(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 7
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 2, i64 3, i64 1, i64 4, i64 5, i64 6, i64 7>
-  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> splat (i1 true), <8 x i16> poison)
+  %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %allones, <8 x i16> poison)
   ret <8 x i16> %v
 }
 
@@ -13819,7 +15082,7 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
 ; RV64ZVE32F-NEXT:    addi sp, sp, 144
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr inbounds i64, ptr %base, <32 x i64> <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14, i64 16, i64 18, i64 20, i64 22, i64 24, i64 26, i64 28, i64 30, i64 32, i64 34, i64 36, i64 38, i64 40, i64 42, i64 44, i64 46, i64 48, i64 50, i64 52, i64 54, i64 56, i64 58, i64 60, i64 62>
-  %x = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %ptrs, i32 8, <32 x i1> splat (i1 true), <32 x i64> poison)
+  %x = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %ptrs, i32 8, <32 x i1> shufflevector(<32 x i1> insertelement(<32 x i1> poison, i1 true, i32 0), <32 x i1> poison, <32 x i32> zeroinitializer), <32 x i64> poison)
   ret <32 x i64> %x
 }
 
@@ -13862,7 +15125,7 @@ define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
 ; RV64ZVE32F-NEXT:    vslide1down.vx v8, v8, a0
 ; RV64ZVE32F-NEXT:    ret
   %ptrs = getelementptr i32, ptr %base, <4 x i64> <i64 34, i64 35, i64 0, i64 1>
-  %x = call <4 x i32> @llvm.masked.gather.v4i32.v32p0(<4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 true), <4 x i32> poison)
+  %x = call <4 x i32> @llvm.masked.gather.v4i32.v32p0(<4 x ptr> %ptrs, i32 8, <4 x i1> shufflevector(<4 x i1> insertelement(<4 x i1> poison, i1 true, i32 0), <4 x i1> poison, <4 x i32> zeroinitializer), <4 x i32> poison)
   ret <4 x i32> %x
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index aa815e18ac1014..b2ff4714556319 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -336,19 +336,49 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i8:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 8(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB6_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB6_6
+; RV64ZVE32F-NEXT:  .LBB6_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB6_7
+; RV64ZVE32F-NEXT:  .LBB6_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB6_8
+; RV64ZVE32F-NEXT:  .LBB6_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB6_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT:    vse8.v v8, (a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB6_2
+; RV64ZVE32F-NEXT:  .LBB6_6: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vse8.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse8.v v9, (a0)
+; RV64ZVE32F-NEXT:    vse8.v v9, (a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB6_3
+; RV64ZVE32F-NEXT:  .LBB6_7: # %cond.store3
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse8.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse8.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB6_4
+; RV64ZVE32F-NEXT:  .LBB6_8: # %cond.store5
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse8.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> %mtrue)
   ret void
 }
 
@@ -853,19 +883,49 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i16:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 8(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB15_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB15_6
+; RV64ZVE32F-NEXT:  .LBB15_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB15_7
+; RV64ZVE32F-NEXT:  .LBB15_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB15_8
+; RV64ZVE32F-NEXT:  .LBB15_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB15_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB15_2
+; RV64ZVE32F-NEXT:  .LBB15_6: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB15_3
+; RV64ZVE32F-NEXT:  .LBB15_7: # %cond.store3
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB15_4
+; RV64ZVE32F-NEXT:  .LBB15_8: # %cond.store5
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue)
   ret void
 }
 
@@ -1728,19 +1788,49 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i32:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 8(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB26_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB26_6
+; RV64ZVE32F-NEXT:  .LBB26_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB26_7
+; RV64ZVE32F-NEXT:  .LBB26_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB26_8
+; RV64ZVE32F-NEXT:  .LBB26_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB26_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB26_2
+; RV64ZVE32F-NEXT:  .LBB26_6: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse32.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse32.v v9, (a0)
+; RV64ZVE32F-NEXT:    vse32.v v9, (a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB26_3
+; RV64ZVE32F-NEXT:  .LBB26_7: # %cond.store3
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse32.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB26_4
+; RV64ZVE32F-NEXT:  .LBB26_8: # %cond.store5
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue)
   ret void
 }
 
@@ -3073,22 +3163,50 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
 ; RV32ZVE32F-NEXT:    lw a2, 24(a0)
 ; RV32ZVE32F-NEXT:    lw a3, 20(a0)
 ; RV32ZVE32F-NEXT:    lw a4, 16(a0)
-; RV32ZVE32F-NEXT:    lw a5, 12(a0)
+; RV32ZVE32F-NEXT:    lw a7, 12(a0)
 ; RV32ZVE32F-NEXT:    lw a6, 8(a0)
-; RV32ZVE32F-NEXT:    lw a7, 0(a0)
-; RV32ZVE32F-NEXT:    lw a0, 4(a0)
+; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32ZVE32F-NEXT:    vmset.m v9
+; RV32ZVE32F-NEXT:    vmv.x.s a5, v9
+; RV32ZVE32F-NEXT:    beqz zero, .LBB39_5
+; RV32ZVE32F-NEXT:  # %bb.1: # %else
+; RV32ZVE32F-NEXT:    andi a0, a5, 2
+; RV32ZVE32F-NEXT:    bnez a0, .LBB39_6
+; RV32ZVE32F-NEXT:  .LBB39_2: # %else2
+; RV32ZVE32F-NEXT:    andi a0, a5, 4
+; RV32ZVE32F-NEXT:    bnez a0, .LBB39_7
+; RV32ZVE32F-NEXT:  .LBB39_3: # %else4
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    bnez a5, .LBB39_8
+; RV32ZVE32F-NEXT:  .LBB39_4: # %else6
+; RV32ZVE32F-NEXT:    ret
+; RV32ZVE32F-NEXT:  .LBB39_5: # %cond.store
+; RV32ZVE32F-NEXT:    lw t0, 4(a0)
+; RV32ZVE32F-NEXT:    lw a0, 0(a0)
+; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s t1, v8
+; RV32ZVE32F-NEXT:    sw t0, 4(t1)
+; RV32ZVE32F-NEXT:    sw a0, 0(t1)
+; RV32ZVE32F-NEXT:    andi a0, a5, 2
+; RV32ZVE32F-NEXT:    beqz a0, .LBB39_2
+; RV32ZVE32F-NEXT:  .LBB39_6: # %cond.store1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s t0, v8
-; RV32ZVE32F-NEXT:    sw a7, 0(t0)
-; RV32ZVE32F-NEXT:    sw a0, 4(t0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
+; RV32ZVE32F-NEXT:    sw a7, 4(a0)
 ; RV32ZVE32F-NEXT:    sw a6, 0(a0)
-; RV32ZVE32F-NEXT:    sw a5, 4(a0)
+; RV32ZVE32F-NEXT:    andi a0, a5, 4
+; RV32ZVE32F-NEXT:    beqz a0, .LBB39_3
+; RV32ZVE32F-NEXT:  .LBB39_7: # %cond.store3
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
 ; RV32ZVE32F-NEXT:    sw a4, 0(a0)
 ; RV32ZVE32F-NEXT:    sw a3, 4(a0)
+; RV32ZVE32F-NEXT:    andi a5, a5, 8
+; RV32ZVE32F-NEXT:    beqz a5, .LBB39_4
+; RV32ZVE32F-NEXT:  .LBB39_8: # %cond.store5
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
 ; RV32ZVE32F-NEXT:    sw a2, 0(a0)
@@ -3098,19 +3216,46 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4i64:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a2, 24(a1)
-; RV64ZVE32F-NEXT:    ld a3, 16(a1)
-; RV64ZVE32F-NEXT:    ld a4, 8(a1)
+; RV64ZVE32F-NEXT:    ld a4, 16(a1)
+; RV64ZVE32F-NEXT:    ld a7, 8(a1)
+; RV64ZVE32F-NEXT:    ld a3, 24(a0)
+; RV64ZVE32F-NEXT:    ld a5, 16(a0)
+; RV64ZVE32F-NEXT:    ld t0, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a6, v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB39_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a6, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB39_6
+; RV64ZVE32F-NEXT:  .LBB39_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a6, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB39_7
+; RV64ZVE32F-NEXT:  .LBB39_3: # %else4
+; RV64ZVE32F-NEXT:    andi a0, a6, 8
+; RV64ZVE32F-NEXT:    bnez a0, .LBB39_8
+; RV64ZVE32F-NEXT:  .LBB39_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB39_5: # %cond.store
 ; RV64ZVE32F-NEXT:    ld a1, 0(a1)
-; RV64ZVE32F-NEXT:    ld a5, 0(a0)
-; RV64ZVE32F-NEXT:    ld a6, 8(a0)
-; RV64ZVE32F-NEXT:    ld a7, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    sd a5, 0(a1)
-; RV64ZVE32F-NEXT:    sd a6, 0(a4)
-; RV64ZVE32F-NEXT:    sd a7, 0(a3)
-; RV64ZVE32F-NEXT:    sd a0, 0(a2)
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    sd a0, 0(a1)
+; RV64ZVE32F-NEXT:    andi a0, a6, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB39_2
+; RV64ZVE32F-NEXT:  .LBB39_6: # %cond.store1
+; RV64ZVE32F-NEXT:    sd t0, 0(a7)
+; RV64ZVE32F-NEXT:    andi a0, a6, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB39_3
+; RV64ZVE32F-NEXT:  .LBB39_7: # %cond.store3
+; RV64ZVE32F-NEXT:    sd a5, 0(a4)
+; RV64ZVE32F-NEXT:    andi a0, a6, 8
+; RV64ZVE32F-NEXT:    beqz a0, .LBB39_4
+; RV64ZVE32F-NEXT:  .LBB39_8: # %cond.store5
+; RV64ZVE32F-NEXT:    sd a3, 0(a2)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4i64.v4p0(<4 x i64> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue)
   ret void
 }
 
@@ -6023,19 +6168,49 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f16:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 8(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB55_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB55_6
+; RV64ZVE32F-NEXT:  .LBB55_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB55_7
+; RV64ZVE32F-NEXT:  .LBB55_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB55_8
+; RV64ZVE32F-NEXT:  .LBB55_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB55_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB55_2
+; RV64ZVE32F-NEXT:  .LBB55_6: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB55_3
+; RV64ZVE32F-NEXT:  .LBB55_7: # %cond.store3
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB55_4
+; RV64ZVE32F-NEXT:  .LBB55_8: # %cond.store5
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> %mtrue)
   ret void
 }
 
@@ -6845,19 +7020,49 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) {
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f32:
 ; RV64ZVE32F:       # %bb.0:
 ; RV64ZVE32F-NEXT:    ld a1, 24(a0)
-; RV64ZVE32F-NEXT:    ld a2, 0(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 8(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB65_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB65_6
+; RV64ZVE32F-NEXT:  .LBB65_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB65_7
+; RV64ZVE32F-NEXT:  .LBB65_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB65_8
+; RV64ZVE32F-NEXT:  .LBB65_4: # %else6
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB65_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse32.v v8, (a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB65_2
+; RV64ZVE32F-NEXT:  .LBB65_6: # %cond.store1
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse32.v v8, (a2)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse32.v v9, (a0)
+; RV64ZVE32F-NEXT:    vse32.v v9, (a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB65_3
+; RV64ZVE32F-NEXT:  .LBB65_7: # %cond.store3
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse32.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse32.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB65_4
+; RV64ZVE32F-NEXT:  .LBB65_8: # %cond.store5
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV64ZVE32F-NEXT:    vse32.v v8, (a1)
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> %mtrue)
   ret void
 }
 
@@ -8163,15 +8368,43 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) {
 ;
 ; RV32ZVE32F-LABEL: mscatter_truemask_v4f64:
 ; RV32ZVE32F:       # %bb.0:
+; RV32ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32ZVE32F-NEXT:    vmset.m v9
+; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
+; RV32ZVE32F-NEXT:    beqz zero, .LBB78_5
+; RV32ZVE32F-NEXT:  # %bb.1: # %else
+; RV32ZVE32F-NEXT:    andi a1, a0, 2
+; RV32ZVE32F-NEXT:    bnez a1, .LBB78_6
+; RV32ZVE32F-NEXT:  .LBB78_2: # %else2
+; RV32ZVE32F-NEXT:    andi a1, a0, 4
+; RV32ZVE32F-NEXT:    bnez a1, .LBB78_7
+; RV32ZVE32F-NEXT:  .LBB78_3: # %else4
+; RV32ZVE32F-NEXT:    andi a0, a0, 8
+; RV32ZVE32F-NEXT:    bnez a0, .LBB78_8
+; RV32ZVE32F-NEXT:  .LBB78_4: # %else6
+; RV32ZVE32F-NEXT:    ret
+; RV32ZVE32F-NEXT:  .LBB78_5: # %cond.store
+; RV32ZVE32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV32ZVE32F-NEXT:    fsd fa0, 0(a1)
+; RV32ZVE32F-NEXT:    andi a1, a0, 2
+; RV32ZVE32F-NEXT:    beqz a1, .LBB78_2
+; RV32ZVE32F-NEXT:  .LBB78_6: # %cond.store1
 ; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
-; RV32ZVE32F-NEXT:    fsd fa0, 0(a0)
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
-; RV32ZVE32F-NEXT:    fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV32ZVE32F-NEXT:    fsd fa1, 0(a1)
+; RV32ZVE32F-NEXT:    andi a1, a0, 4
+; RV32ZVE32F-NEXT:    beqz a1, .LBB78_3
+; RV32ZVE32F-NEXT:  .LBB78_7: # %cond.store3
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV32ZVE32F-NEXT:    vmv.x.s a0, v9
-; RV32ZVE32F-NEXT:    fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV32ZVE32F-NEXT:    fsd fa2, 0(a1)
+; RV32ZVE32F-NEXT:    andi a0, a0, 8
+; RV32ZVE32F-NEXT:    beqz a0, .LBB78_4
+; RV32ZVE32F-NEXT:  .LBB78_8: # %cond.store5
+; RV32ZVE32F-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; RV32ZVE32F-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32ZVE32F-NEXT:    vmv.x.s a0, v8
 ; RV32ZVE32F-NEXT:    fsd fa3, 0(a0)
@@ -8179,16 +8412,43 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x ptr> %ptrs) {
 ;
 ; RV64ZVE32F-LABEL: mscatter_truemask_v4f64:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    ld a1, 0(a0)
-; RV64ZVE32F-NEXT:    ld a2, 8(a0)
-; RV64ZVE32F-NEXT:    ld a3, 16(a0)
-; RV64ZVE32F-NEXT:    ld a0, 24(a0)
-; RV64ZVE32F-NEXT:    fsd fa0, 0(a1)
-; RV64ZVE32F-NEXT:    fsd fa1, 0(a2)
-; RV64ZVE32F-NEXT:    fsd fa2, 0(a3)
-; RV64ZVE32F-NEXT:    fsd fa3, 0(a0)
+; RV64ZVE32F-NEXT:    ld a1, 24(a0)
+; RV64ZVE32F-NEXT:    ld a2, 16(a0)
+; RV64ZVE32F-NEXT:    ld a4, 8(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a3, v8
+; RV64ZVE32F-NEXT:    beqz zero, .LBB78_5
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    bnez a0, .LBB78_6
+; RV64ZVE32F-NEXT:  .LBB78_2: # %else2
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    bnez a0, .LBB78_7
+; RV64ZVE32F-NEXT:  .LBB78_3: # %else4
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    bnez a3, .LBB78_8
+; RV64ZVE32F-NEXT:  .LBB78_4: # %else6
 ; RV64ZVE32F-NEXT:    ret
-  call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> splat (i1 1))
+; RV64ZVE32F-NEXT:  .LBB78_5: # %cond.store
+; RV64ZVE32F-NEXT:    ld a0, 0(a0)
+; RV64ZVE32F-NEXT:    fsd fa0, 0(a0)
+; RV64ZVE32F-NEXT:    andi a0, a3, 2
+; RV64ZVE32F-NEXT:    beqz a0, .LBB78_2
+; RV64ZVE32F-NEXT:  .LBB78_6: # %cond.store1
+; RV64ZVE32F-NEXT:    fsd fa1, 0(a4)
+; RV64ZVE32F-NEXT:    andi a0, a3, 4
+; RV64ZVE32F-NEXT:    beqz a0, .LBB78_3
+; RV64ZVE32F-NEXT:  .LBB78_7: # %cond.store3
+; RV64ZVE32F-NEXT:    fsd fa2, 0(a2)
+; RV64ZVE32F-NEXT:    andi a3, a3, 8
+; RV64ZVE32F-NEXT:    beqz a3, .LBB78_4
+; RV64ZVE32F-NEXT:  .LBB78_8: # %cond.store5
+; RV64ZVE32F-NEXT:    fsd fa3, 0(a1)
+; RV64ZVE32F-NEXT:    ret
+  %mhead = insertelement <4 x i1> poison, i1 1, i32 0
+  %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> poison, <4 x i32> zeroinitializer
+  call void @llvm.masked.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, i32 8, <4 x i1> %mtrue)
   ret void
 }
 
@@ -11084,8 +11344,10 @@ define void @mscatter_unit_stride(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
   ret void
 }
 
@@ -11096,8 +11358,10 @@ define void @mscatter_unit_stride_with_offset(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
   ret void
 }
 
@@ -11109,8 +11373,10 @@ define void @mscatter_shuffle_reverse(<8 x i16> %val, ptr %base) {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
 ; CHECK-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
   ret void
 }
 
@@ -11133,31 +11399,89 @@ define void @mscatter_shuffle_rotate(<8 x i16> %val, ptr %base) {
 ;
 ; RV64ZVE32F-LABEL: mscatter_shuffle_rotate:
 ; RV64ZVE32F:       # %bb.0:
-; RV64ZVE32F-NEXT:    addi a1, a0, 6
-; RV64ZVE32F-NEXT:    addi a2, a0, 4
-; RV64ZVE32F-NEXT:    addi a3, a0, 2
-; RV64ZVE32F-NEXT:    addi a4, a0, 14
-; RV64ZVE32F-NEXT:    addi a5, a0, 12
-; RV64ZVE32F-NEXT:    addi a6, a0, 10
-; RV64ZVE32F-NEXT:    addi a7, a0, 8
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v9
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB96_9
+; RV64ZVE32F-NEXT:  # %bb.1: # %else
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_10
+; RV64ZVE32F-NEXT:  .LBB96_2: # %else2
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_11
+; RV64ZVE32F-NEXT:  .LBB96_3: # %else4
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_12
+; RV64ZVE32F-NEXT:  .LBB96_4: # %else6
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_13
+; RV64ZVE32F-NEXT:  .LBB96_5: # %else8
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_14
+; RV64ZVE32F-NEXT:  .LBB96_6: # %else10
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    bnez a2, .LBB96_15
+; RV64ZVE32F-NEXT:  .LBB96_7: # %else12
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    bnez a1, .LBB96_16
+; RV64ZVE32F-NEXT:  .LBB96_8: # %else14
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB96_9: # %cond.store
+; RV64ZVE32F-NEXT:    addi a2, a0, 8
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32F-NEXT:    vse16.v v8, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 2
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_2
+; RV64ZVE32F-NEXT:  .LBB96_10: # %cond.store1
+; RV64ZVE32F-NEXT:    addi a2, a0, 10
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64ZVE32F-NEXT:    vse16.v v8, (a7)
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT:    vse16.v v9, (a6)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_3
+; RV64ZVE32F-NEXT:  .LBB96_11: # %cond.store3
+; RV64ZVE32F-NEXT:    addi a2, a0, 12
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 2
-; RV64ZVE32F-NEXT:    vse16.v v9, (a5)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 8
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_4
+; RV64ZVE32F-NEXT:  .LBB96_12: # %cond.store5
+; RV64ZVE32F-NEXT:    addi a2, a0, 14
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 3
-; RV64ZVE32F-NEXT:    vse16.v v9, (a4)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_5
+; RV64ZVE32F-NEXT:  .LBB96_13: # %cond.store7
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a0)
+; RV64ZVE32F-NEXT:    andi a2, a1, 32
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_6
+; RV64ZVE32F-NEXT:  .LBB96_14: # %cond.store9
+; RV64ZVE32F-NEXT:    addi a2, a0, 2
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 5
-; RV64ZVE32F-NEXT:    vse16.v v9, (a3)
+; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a2, a1, 64
+; RV64ZVE32F-NEXT:    beqz a2, .LBB96_7
+; RV64ZVE32F-NEXT:  .LBB96_15: # %cond.store11
+; RV64ZVE32F-NEXT:    addi a2, a0, 4
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 6
 ; RV64ZVE32F-NEXT:    vse16.v v9, (a2)
+; RV64ZVE32F-NEXT:    andi a1, a1, -128
+; RV64ZVE32F-NEXT:    beqz a1, .LBB96_8
+; RV64ZVE32F-NEXT:  .LBB96_16: # %cond.store13
+; RV64ZVE32F-NEXT:    addi a0, a0, 6
+; RV64ZVE32F-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 7
-; RV64ZVE32F-NEXT:    vse16.v v8, (a1)
+; RV64ZVE32F-NEXT:    vse16.v v8, (a0)
 ; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <8 x i1> poison, i1 true, i16 0
+  %allones = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %ptrs = getelementptr inbounds i16, ptr %base, <8 x i64>  <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
-  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> splat (i1 true))
+  call void @llvm.masked.scatter.v8i16.v8p0(<8 x i16> %val, <8 x ptr> %ptrs, i32 2, <8 x i1> %allones)
   ret void
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 2c62cbd583d00c..e15253b67275cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -34,7 +34,9 @@ define <2 x i8> @vadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -58,7 +60,9 @@ define <2 x i8> @vadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -68,7 +72,9 @@ define <2 x i8> @vadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -78,7 +84,11 @@ define <2 x i8> @vadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -100,7 +110,9 @@ define <4 x i8> @vadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -136,7 +148,9 @@ define <4 x i8> @vadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -146,7 +160,9 @@ define <4 x i8> @vadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -156,7 +172,11 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -178,7 +198,9 @@ define <5 x i8> @vadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -202,7 +224,9 @@ define <5 x i8> @vadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -212,7 +236,9 @@ define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -222,7 +248,11 @@ define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -244,7 +274,9 @@ define <8 x i8> @vadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -268,7 +300,9 @@ define <8 x i8> @vadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -278,7 +312,9 @@ define <8 x i8> @vadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -288,7 +324,11 @@ define <8 x i8> @vadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -310,7 +350,9 @@ define <16 x i8> @vadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -334,7 +376,9 @@ define <16 x i8> @vadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -344,7 +388,9 @@ define <16 x i8> @vadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -354,7 +400,11 @@ define <16 x i8> @vadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -381,7 +431,9 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -403,7 +455,11 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -420,7 +476,9 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -433,7 +491,9 @@ define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -455,7 +515,9 @@ define <2 x i16> @vadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -479,7 +541,9 @@ define <2 x i16> @vadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -489,7 +553,9 @@ define <2 x i16> @vadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -499,7 +565,11 @@ define <2 x i16> @vadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -521,7 +591,9 @@ define <4 x i16> @vadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -545,7 +617,9 @@ define <4 x i16> @vadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -555,7 +629,9 @@ define <4 x i16> @vadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -565,7 +641,11 @@ define <4 x i16> @vadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -587,7 +667,9 @@ define <8 x i16> @vadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -611,7 +693,9 @@ define <8 x i16> @vadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -621,7 +705,9 @@ define <8 x i16> @vadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -631,7 +717,11 @@ define <8 x i16> @vadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -653,7 +743,9 @@ define <16 x i16> @vadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -677,7 +769,9 @@ define <16 x i16> @vadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -687,7 +781,9 @@ define <16 x i16> @vadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -697,7 +793,11 @@ define <16 x i16> @vadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -719,7 +819,9 @@ define <2 x i32> @vadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -743,7 +845,9 @@ define <2 x i32> @vadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -753,7 +857,9 @@ define <2 x i32> @vadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -763,7 +869,11 @@ define <2 x i32> @vadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -785,7 +895,9 @@ define <4 x i32> @vadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -809,7 +921,9 @@ define <4 x i32> @vadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -819,7 +933,9 @@ define <4 x i32> @vadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -829,7 +945,11 @@ define <4 x i32> @vadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -851,7 +971,9 @@ define <8 x i32> @vadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -875,7 +997,9 @@ define <8 x i32> @vadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -885,7 +1009,9 @@ define <8 x i32> @vadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -895,7 +1021,11 @@ define <8 x i32> @vadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -917,7 +1047,9 @@ define <16 x i32> @vadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -941,7 +1073,9 @@ define <16 x i32> @vadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -951,7 +1085,9 @@ define <16 x i32> @vadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -961,7 +1097,11 @@ define <16 x i32> @vadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -983,7 +1123,9 @@ define <2 x i64> @vadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1035,7 +1177,9 @@ define <2 x i64> @vadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1045,7 +1189,9 @@ define <2 x i64> @vadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1055,7 +1201,11 @@ define <2 x i64> @vadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1077,7 +1227,9 @@ define <4 x i64> @vadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1129,7 +1281,9 @@ define <4 x i64> @vadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1139,7 +1293,9 @@ define <4 x i64> @vadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1149,7 +1305,11 @@ define <4 x i64> @vadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1171,7 +1331,9 @@ define <8 x i64> @vadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1223,7 +1385,9 @@ define <8 x i64> @vadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1233,7 +1397,9 @@ define <8 x i64> @vadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1243,7 +1409,11 @@ define <8 x i64> @vadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1265,7 +1435,9 @@ define <16 x i64> @vadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1317,7 +1489,9 @@ define <16 x i64> @vadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1327,7 +1501,9 @@ define <16 x i64> @vadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1337,7 +1513,11 @@ define <16 x i64> @vadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1349,16 +1529,16 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vadd_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1390,22 +1570,24 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vadd_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vadd.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1433,7 +1615,11 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vadd.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1454,7 +1640,9 @@ define <32 x i64> @vadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vsetivli zero, 12, e64, m8, ta, ma
 ; RV64-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1483,6 +1671,8 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 3db44e87109bd4..6af5ba185b8b8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -39,7 +39,9 @@ define <2 x i8> @vmax_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -63,7 +65,9 @@ define <2 x i8> @vmax_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -85,7 +89,9 @@ define <4 x i8> @vmax_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -121,7 +127,9 @@ define <4 x i8> @vmax_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -143,7 +151,9 @@ define <5 x i8> @vmax_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -167,7 +177,9 @@ define <5 x i8> @vmax_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -189,7 +201,9 @@ define <8 x i8> @vmax_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -213,7 +227,9 @@ define <8 x i8> @vmax_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -235,7 +251,9 @@ define <16 x i8> @vmax_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -259,7 +277,9 @@ define <16 x i8> @vmax_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -312,7 +332,9 @@ define <256 x i8> @vmax_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.smax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.smax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -368,7 +390,9 @@ define <2 x i16> @vmax_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -392,7 +416,9 @@ define <2 x i16> @vmax_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -414,7 +440,9 @@ define <4 x i16> @vmax_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -438,7 +466,9 @@ define <4 x i16> @vmax_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -460,7 +490,9 @@ define <8 x i16> @vmax_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -484,7 +516,9 @@ define <8 x i16> @vmax_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -506,7 +540,9 @@ define <16 x i16> @vmax_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -530,7 +566,9 @@ define <16 x i16> @vmax_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -552,7 +590,9 @@ define <2 x i32> @vmax_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -576,7 +616,9 @@ define <2 x i32> @vmax_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -598,7 +640,9 @@ define <4 x i32> @vmax_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -622,7 +666,9 @@ define <4 x i32> @vmax_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -644,7 +690,9 @@ define <8 x i32> @vmax_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -668,7 +716,9 @@ define <8 x i32> @vmax_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -690,7 +740,9 @@ define <16 x i32> @vmax_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -714,7 +766,9 @@ define <16 x i32> @vmax_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -736,7 +790,9 @@ define <2 x i64> @vmax_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -788,7 +844,9 @@ define <2 x i64> @vmax_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -810,7 +868,9 @@ define <4 x i64> @vmax_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -862,7 +922,9 @@ define <4 x i64> @vmax_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -884,7 +946,9 @@ define <8 x i64> @vmax_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -936,7 +1000,9 @@ define <8 x i64> @vmax_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -958,7 +1024,9 @@ define <16 x i64> @vmax_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1010,7 +1078,9 @@ define <16 x i64> @vmax_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1022,16 +1092,16 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vmax_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmax.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1064,6 +1134,8 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmax.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index c97c2232715f56..12c6410068c693 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -38,7 +38,9 @@ define <2 x i8> @vmaxu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -62,7 +64,9 @@ define <2 x i8> @vmaxu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,7 +88,9 @@ define <4 x i8> @vmaxu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -120,7 +126,9 @@ define <4 x i8> @vmaxu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -142,7 +150,9 @@ define <5 x i8> @vmaxu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -166,7 +176,9 @@ define <5 x i8> @vmaxu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -188,7 +200,9 @@ define <8 x i8> @vmaxu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -212,7 +226,9 @@ define <8 x i8> @vmaxu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -234,7 +250,9 @@ define <16 x i8> @vmaxu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -258,7 +276,9 @@ define <16 x i8> @vmaxu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -311,7 +331,9 @@ define <256 x i8> @vmaxu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.umax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.umax.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -367,7 +389,9 @@ define <2 x i16> @vmaxu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -391,7 +415,9 @@ define <2 x i16> @vmaxu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -413,7 +439,9 @@ define <4 x i16> @vmaxu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -437,7 +465,9 @@ define <4 x i16> @vmaxu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -459,7 +489,9 @@ define <8 x i16> @vmaxu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -483,7 +515,9 @@ define <8 x i16> @vmaxu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -505,7 +539,9 @@ define <16 x i16> @vmaxu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -529,7 +565,9 @@ define <16 x i16> @vmaxu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -551,7 +589,9 @@ define <2 x i32> @vmaxu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -575,7 +615,9 @@ define <2 x i32> @vmaxu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -597,7 +639,9 @@ define <4 x i32> @vmaxu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -621,7 +665,9 @@ define <4 x i32> @vmaxu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -643,7 +689,9 @@ define <8 x i32> @vmaxu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -667,7 +715,9 @@ define <8 x i32> @vmaxu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -689,7 +739,9 @@ define <16 x i32> @vmaxu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -713,7 +765,9 @@ define <16 x i32> @vmaxu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -735,7 +789,9 @@ define <2 x i64> @vmaxu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -787,7 +843,9 @@ define <2 x i64> @vmaxu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -809,7 +867,9 @@ define <4 x i64> @vmaxu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -861,7 +921,9 @@ define <4 x i64> @vmaxu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -883,7 +945,9 @@ define <8 x i64> @vmaxu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -935,7 +999,9 @@ define <8 x i64> @vmaxu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -957,7 +1023,9 @@ define <16 x i64> @vmaxu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1009,7 +1077,9 @@ define <16 x i64> @vmaxu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1021,16 +1091,16 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vmaxu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmaxu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1063,6 +1133,8 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmaxu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index eaa19110a2a28c..f5b9421d28c349 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -39,7 +39,9 @@ define <2 x i8> @vmin_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -63,7 +65,9 @@ define <2 x i8> @vmin_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -85,7 +89,9 @@ define <4 x i8> @vmin_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -121,7 +127,9 @@ define <4 x i8> @vmin_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -143,7 +151,9 @@ define <5 x i8> @vmin_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -167,7 +177,9 @@ define <5 x i8> @vmin_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -189,7 +201,9 @@ define <8 x i8> @vmin_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -213,7 +227,9 @@ define <8 x i8> @vmin_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -235,7 +251,9 @@ define <16 x i8> @vmin_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -259,7 +277,9 @@ define <16 x i8> @vmin_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -312,7 +332,9 @@ define <256 x i8> @vmin_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.smin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.smin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -368,7 +390,9 @@ define <2 x i16> @vmin_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -392,7 +416,9 @@ define <2 x i16> @vmin_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -414,7 +440,9 @@ define <4 x i16> @vmin_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -438,7 +466,9 @@ define <4 x i16> @vmin_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -460,7 +490,9 @@ define <8 x i16> @vmin_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -484,7 +516,9 @@ define <8 x i16> @vmin_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -506,7 +540,9 @@ define <16 x i16> @vmin_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -530,7 +566,9 @@ define <16 x i16> @vmin_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -552,7 +590,9 @@ define <2 x i32> @vmin_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -576,7 +616,9 @@ define <2 x i32> @vmin_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -598,7 +640,9 @@ define <4 x i32> @vmin_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -622,7 +666,9 @@ define <4 x i32> @vmin_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -644,7 +690,9 @@ define <8 x i32> @vmin_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -668,7 +716,9 @@ define <8 x i32> @vmin_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -690,7 +740,9 @@ define <16 x i32> @vmin_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -714,7 +766,9 @@ define <16 x i32> @vmin_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -736,7 +790,9 @@ define <2 x i64> @vmin_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -788,7 +844,9 @@ define <2 x i64> @vmin_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -810,7 +868,9 @@ define <4 x i64> @vmin_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -862,7 +922,9 @@ define <4 x i64> @vmin_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -884,7 +946,9 @@ define <8 x i64> @vmin_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -936,7 +1000,9 @@ define <8 x i64> @vmin_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -958,7 +1024,9 @@ define <16 x i64> @vmin_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1010,7 +1078,9 @@ define <16 x i64> @vmin_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1022,16 +1092,16 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vmin_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vmin.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1064,6 +1134,8 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vmin.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 48175e5b905ba5..d07580efceb50b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -38,7 +38,9 @@ define <2 x i8> @vminu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -62,7 +64,9 @@ define <2 x i8> @vminu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,7 +88,9 @@ define <4 x i8> @vminu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -120,7 +126,9 @@ define <4 x i8> @vminu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -142,7 +150,9 @@ define <5 x i8> @vminu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -166,7 +176,9 @@ define <5 x i8> @vminu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -188,7 +200,9 @@ define <8 x i8> @vminu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -212,7 +226,9 @@ define <8 x i8> @vminu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -234,7 +250,9 @@ define <16 x i8> @vminu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -258,7 +276,9 @@ define <16 x i8> @vminu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -311,7 +331,9 @@ define <256 x i8> @vminu_vx_v258i8_unmasked(<256 x i8> %va, i8 %b, i32 zeroext %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
-  %v = call <256 x i8> @llvm.vp.umin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.umin.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -367,7 +389,9 @@ define <2 x i16> @vminu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -391,7 +415,9 @@ define <2 x i16> @vminu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -413,7 +439,9 @@ define <4 x i16> @vminu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -437,7 +465,9 @@ define <4 x i16> @vminu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -459,7 +489,9 @@ define <8 x i16> @vminu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -483,7 +515,9 @@ define <8 x i16> @vminu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -505,7 +539,9 @@ define <16 x i16> @vminu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -529,7 +565,9 @@ define <16 x i16> @vminu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -551,7 +589,9 @@ define <2 x i32> @vminu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -575,7 +615,9 @@ define <2 x i32> @vminu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -597,7 +639,9 @@ define <4 x i32> @vminu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -621,7 +665,9 @@ define <4 x i32> @vminu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -643,7 +689,9 @@ define <8 x i32> @vminu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -667,7 +715,9 @@ define <8 x i32> @vminu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -689,7 +739,9 @@ define <16 x i32> @vminu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -713,7 +765,9 @@ define <16 x i32> @vminu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -735,7 +789,9 @@ define <2 x i64> @vminu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -787,7 +843,9 @@ define <2 x i64> @vminu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -809,7 +867,9 @@ define <4 x i64> @vminu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -861,7 +921,9 @@ define <4 x i64> @vminu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -883,7 +945,9 @@ define <8 x i64> @vminu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -935,7 +999,9 @@ define <8 x i64> @vminu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -957,7 +1023,9 @@ define <16 x i64> @vminu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1009,7 +1077,9 @@ define <16 x i64> @vminu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1021,16 +1091,16 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vminu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB74_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB74_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vminu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1063,6 +1133,8 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vminu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 291629de6dcfab..d7ed20f4e09868 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -43,7 +43,9 @@ define <2 x i8> @vsadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -67,7 +69,9 @@ define <2 x i8> @vsadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -77,7 +81,9 @@ define <2 x i8> @vsadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -87,7 +93,11 @@ define <2 x i8> @vsadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -109,7 +119,9 @@ define <4 x i8> @vsadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -145,7 +157,9 @@ define <4 x i8> @vsadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -155,7 +169,9 @@ define <4 x i8> @vsadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -165,7 +181,11 @@ define <4 x i8> @vsadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -187,7 +207,9 @@ define <5 x i8> @vsadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -211,7 +233,9 @@ define <5 x i8> @vsadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -221,7 +245,9 @@ define <5 x i8> @vsadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -231,7 +257,11 @@ define <5 x i8> @vsadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -253,7 +283,9 @@ define <8 x i8> @vsadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -277,7 +309,9 @@ define <8 x i8> @vsadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -287,7 +321,9 @@ define <8 x i8> @vsadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -297,7 +333,11 @@ define <8 x i8> @vsadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -319,7 +359,9 @@ define <16 x i8> @vsadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -343,7 +385,9 @@ define <16 x i8> @vsadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -353,7 +397,9 @@ define <16 x i8> @vsadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -363,7 +409,11 @@ define <16 x i8> @vsadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -390,7 +440,9 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -412,7 +464,11 @@ define <256 x i8> @vsadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -429,7 +485,9 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -446,7 +504,9 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -468,7 +528,9 @@ define <2 x i16> @vsadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -492,7 +554,9 @@ define <2 x i16> @vsadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -502,7 +566,9 @@ define <2 x i16> @vsadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -512,7 +578,11 @@ define <2 x i16> @vsadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -534,7 +604,9 @@ define <4 x i16> @vsadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -558,7 +630,9 @@ define <4 x i16> @vsadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -568,7 +642,9 @@ define <4 x i16> @vsadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -578,7 +654,11 @@ define <4 x i16> @vsadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -600,7 +680,9 @@ define <8 x i16> @vsadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -624,7 +706,9 @@ define <8 x i16> @vsadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -634,7 +718,9 @@ define <8 x i16> @vsadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -644,7 +730,11 @@ define <8 x i16> @vsadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -666,7 +756,9 @@ define <16 x i16> @vsadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -690,7 +782,9 @@ define <16 x i16> @vsadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -700,7 +794,9 @@ define <16 x i16> @vsadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -710,7 +806,11 @@ define <16 x i16> @vsadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -732,7 +832,9 @@ define <2 x i32> @vsadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -756,7 +858,9 @@ define <2 x i32> @vsadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -766,7 +870,9 @@ define <2 x i32> @vsadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -776,7 +882,11 @@ define <2 x i32> @vsadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -798,7 +908,9 @@ define <4 x i32> @vsadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -822,7 +934,9 @@ define <4 x i32> @vsadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -832,7 +946,9 @@ define <4 x i32> @vsadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -842,7 +958,11 @@ define <4 x i32> @vsadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -864,7 +984,9 @@ define <8 x i32> @vsadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -888,7 +1010,9 @@ define <8 x i32> @vsadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -898,7 +1022,9 @@ define <8 x i32> @vsadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -908,7 +1034,11 @@ define <8 x i32> @vsadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -930,7 +1060,9 @@ define <16 x i32> @vsadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -954,7 +1086,9 @@ define <16 x i32> @vsadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -964,7 +1098,9 @@ define <16 x i32> @vsadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -974,7 +1110,11 @@ define <16 x i32> @vsadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -996,7 +1136,9 @@ define <2 x i64> @vsadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1048,7 +1190,9 @@ define <2 x i64> @vsadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1058,7 +1202,9 @@ define <2 x i64> @vsadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1068,7 +1214,11 @@ define <2 x i64> @vsadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1090,7 +1240,9 @@ define <4 x i64> @vsadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1142,7 +1294,9 @@ define <4 x i64> @vsadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1152,7 +1306,9 @@ define <4 x i64> @vsadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1162,7 +1318,11 @@ define <4 x i64> @vsadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1184,7 +1344,9 @@ define <8 x i64> @vsadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1236,7 +1398,9 @@ define <8 x i64> @vsadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1246,7 +1410,9 @@ define <8 x i64> @vsadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1256,7 +1422,11 @@ define <8 x i64> @vsadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1278,7 +1448,9 @@ define <16 x i64> @vsadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1330,7 +1502,9 @@ define <16 x i64> @vsadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1340,7 +1514,9 @@ define <16 x i64> @vsadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1350,7 +1526,11 @@ define <16 x i64> @vsadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsadd.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1362,16 +1542,16 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vsadd_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1403,22 +1583,24 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vsadd_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsadd.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1446,7 +1628,11 @@ define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vsadd.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1477,7 +1663,9 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1506,6 +1694,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index d38ee1148e894e..ea248010ef09a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -39,7 +39,9 @@ define <2 x i8> @vsaddu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -63,7 +65,9 @@ define <2 x i8> @vsaddu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -73,7 +77,9 @@ define <2 x i8> @vsaddu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -83,7 +89,11 @@ define <2 x i8> @vsaddu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -105,7 +115,9 @@ define <4 x i8> @vsaddu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -141,7 +153,9 @@ define <4 x i8> @vsaddu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -151,7 +165,9 @@ define <4 x i8> @vsaddu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -161,7 +177,11 @@ define <4 x i8> @vsaddu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -183,7 +203,9 @@ define <5 x i8> @vsaddu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -207,7 +229,9 @@ define <5 x i8> @vsaddu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -217,7 +241,9 @@ define <5 x i8> @vsaddu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -227,7 +253,11 @@ define <5 x i8> @vsaddu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -249,7 +279,9 @@ define <8 x i8> @vsaddu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -273,7 +305,9 @@ define <8 x i8> @vsaddu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -283,7 +317,9 @@ define <8 x i8> @vsaddu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -293,7 +329,11 @@ define <8 x i8> @vsaddu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -315,7 +355,9 @@ define <16 x i8> @vsaddu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -339,7 +381,9 @@ define <16 x i8> @vsaddu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -349,7 +393,9 @@ define <16 x i8> @vsaddu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -359,7 +405,11 @@ define <16 x i8> @vsaddu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -386,7 +436,9 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -408,7 +460,11 @@ define <256 x i8> @vsaddu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -425,7 +481,9 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -442,7 +500,9 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -464,7 +524,9 @@ define <2 x i16> @vsaddu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -488,7 +550,9 @@ define <2 x i16> @vsaddu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -498,7 +562,9 @@ define <2 x i16> @vsaddu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -508,7 +574,11 @@ define <2 x i16> @vsaddu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -530,7 +600,9 @@ define <4 x i16> @vsaddu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -554,7 +626,9 @@ define <4 x i16> @vsaddu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -564,7 +638,9 @@ define <4 x i16> @vsaddu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -574,7 +650,11 @@ define <4 x i16> @vsaddu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -596,7 +676,9 @@ define <8 x i16> @vsaddu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -620,7 +702,9 @@ define <8 x i16> @vsaddu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -630,7 +714,9 @@ define <8 x i16> @vsaddu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -640,7 +726,11 @@ define <8 x i16> @vsaddu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -662,7 +752,9 @@ define <16 x i16> @vsaddu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -686,7 +778,9 @@ define <16 x i16> @vsaddu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -696,7 +790,9 @@ define <16 x i16> @vsaddu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -706,7 +802,11 @@ define <16 x i16> @vsaddu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -728,7 +828,9 @@ define <2 x i32> @vsaddu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -752,7 +854,9 @@ define <2 x i32> @vsaddu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -762,7 +866,9 @@ define <2 x i32> @vsaddu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -772,7 +878,11 @@ define <2 x i32> @vsaddu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -794,7 +904,9 @@ define <4 x i32> @vsaddu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -818,7 +930,9 @@ define <4 x i32> @vsaddu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -828,7 +942,9 @@ define <4 x i32> @vsaddu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -838,7 +954,11 @@ define <4 x i32> @vsaddu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -860,7 +980,9 @@ define <8 x i32> @vsaddu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -884,7 +1006,9 @@ define <8 x i32> @vsaddu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -894,7 +1018,9 @@ define <8 x i32> @vsaddu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -904,7 +1030,11 @@ define <8 x i32> @vsaddu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -926,7 +1056,9 @@ define <16 x i32> @vsaddu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -950,7 +1082,9 @@ define <16 x i32> @vsaddu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -960,7 +1094,9 @@ define <16 x i32> @vsaddu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -970,7 +1106,11 @@ define <16 x i32> @vsaddu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -992,7 +1132,9 @@ define <2 x i64> @vsaddu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1044,7 +1186,9 @@ define <2 x i64> @vsaddu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1054,7 +1198,9 @@ define <2 x i64> @vsaddu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1064,7 +1210,11 @@ define <2 x i64> @vsaddu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1086,7 +1236,9 @@ define <4 x i64> @vsaddu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1138,7 +1290,9 @@ define <4 x i64> @vsaddu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1148,7 +1302,9 @@ define <4 x i64> @vsaddu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1158,7 +1314,11 @@ define <4 x i64> @vsaddu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1180,7 +1340,9 @@ define <8 x i64> @vsaddu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1232,7 +1394,9 @@ define <8 x i64> @vsaddu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1242,7 +1406,9 @@ define <8 x i64> @vsaddu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1252,7 +1418,11 @@ define <8 x i64> @vsaddu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1274,7 +1444,9 @@ define <16 x i64> @vsaddu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1326,7 +1498,9 @@ define <16 x i64> @vsaddu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1336,7 +1510,9 @@ define <16 x i64> @vsaddu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1346,7 +1522,11 @@ define <16 x i64> @vsaddu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsaddu.vi v8, v8, -1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1358,16 +1538,16 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV32-LABEL: vsaddu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1399,22 +1579,24 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vsaddu_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vsaddu.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1442,7 +1624,11 @@ define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1473,7 +1659,9 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1502,6 +1690,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vsaddu.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 2caa2ff41a7d93..32b8d10d87173d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -43,7 +43,9 @@ define <2 x i8> @vssub_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -67,7 +69,9 @@ define <2 x i8> @vssub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -78,7 +82,9 @@ define <2 x i8> @vssub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -89,7 +95,11 @@ define <2 x i8> @vssub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -111,7 +121,9 @@ define <4 x i8> @vssub_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -149,7 +161,9 @@ define <4 x i8> @vssub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -160,7 +174,9 @@ define <4 x i8> @vssub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -171,7 +187,11 @@ define <4 x i8> @vssub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -193,7 +213,9 @@ define <5 x i8> @vssub_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -217,7 +239,9 @@ define <5 x i8> @vssub_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -228,7 +252,9 @@ define <5 x i8> @vssub_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -239,7 +265,11 @@ define <5 x i8> @vssub_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -261,7 +291,9 @@ define <8 x i8> @vssub_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -285,7 +317,9 @@ define <8 x i8> @vssub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -296,7 +330,9 @@ define <8 x i8> @vssub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -307,7 +343,11 @@ define <8 x i8> @vssub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -329,7 +369,9 @@ define <16 x i8> @vssub_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -353,7 +395,9 @@ define <16 x i8> @vssub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -364,7 +408,9 @@ define <16 x i8> @vssub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -375,7 +421,11 @@ define <16 x i8> @vssub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -403,7 +453,9 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -426,7 +478,11 @@ define <256 x i8> @vssub_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v16, v16, a2
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -444,7 +500,9 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -462,7 +520,9 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -484,7 +544,9 @@ define <2 x i16> @vssub_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -508,7 +570,9 @@ define <2 x i16> @vssub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -519,7 +583,9 @@ define <2 x i16> @vssub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -530,7 +596,11 @@ define <2 x i16> @vssub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -552,7 +622,9 @@ define <4 x i16> @vssub_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -576,7 +648,9 @@ define <4 x i16> @vssub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -587,7 +661,9 @@ define <4 x i16> @vssub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -598,7 +674,11 @@ define <4 x i16> @vssub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -620,7 +700,9 @@ define <8 x i16> @vssub_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -644,7 +726,9 @@ define <8 x i16> @vssub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -655,7 +739,9 @@ define <8 x i16> @vssub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -666,7 +752,11 @@ define <8 x i16> @vssub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -688,7 +778,9 @@ define <16 x i16> @vssub_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -712,7 +804,9 @@ define <16 x i16> @vssub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -723,7 +817,9 @@ define <16 x i16> @vssub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -734,7 +830,11 @@ define <16 x i16> @vssub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -756,7 +856,9 @@ define <2 x i32> @vssub_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -780,7 +882,9 @@ define <2 x i32> @vssub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -791,7 +895,9 @@ define <2 x i32> @vssub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -802,7 +908,11 @@ define <2 x i32> @vssub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -824,7 +934,9 @@ define <4 x i32> @vssub_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -848,7 +960,9 @@ define <4 x i32> @vssub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -859,7 +973,9 @@ define <4 x i32> @vssub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -870,7 +986,11 @@ define <4 x i32> @vssub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -892,7 +1012,9 @@ define <8 x i32> @vssub_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -916,7 +1038,9 @@ define <8 x i32> @vssub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -927,7 +1051,9 @@ define <8 x i32> @vssub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -938,7 +1064,11 @@ define <8 x i32> @vssub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -960,7 +1090,9 @@ define <16 x i32> @vssub_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -984,7 +1116,9 @@ define <16 x i32> @vssub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -995,7 +1129,9 @@ define <16 x i32> @vssub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1006,7 +1142,11 @@ define <16 x i32> @vssub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1028,7 +1168,9 @@ define <2 x i64> @vssub_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1080,7 +1222,9 @@ define <2 x i64> @vssub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1091,7 +1235,9 @@ define <2 x i64> @vssub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1102,7 +1248,11 @@ define <2 x i64> @vssub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1124,7 +1274,9 @@ define <4 x i64> @vssub_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1176,7 +1328,9 @@ define <4 x i64> @vssub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1187,7 +1341,9 @@ define <4 x i64> @vssub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1198,7 +1354,11 @@ define <4 x i64> @vssub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1220,7 +1380,9 @@ define <8 x i64> @vssub_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1272,7 +1434,9 @@ define <8 x i64> @vssub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1283,7 +1447,9 @@ define <8 x i64> @vssub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1294,7 +1460,11 @@ define <8 x i64> @vssub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1316,7 +1486,9 @@ define <16 x i64> @vssub_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1368,7 +1540,9 @@ define <16 x i64> @vssub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1379,7 +1553,9 @@ define <16 x i64> @vssub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1390,7 +1566,11 @@ define <16 x i64> @vssub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssub.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1402,16 +1582,16 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV32-LABEL: vssub_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1444,22 +1624,24 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vssub_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssub.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1488,7 +1670,11 @@ define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vssub.vx v16, v16, a2
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1520,7 +1706,9 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1550,6 +1738,8 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssub.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 6313f31bc1a615..60c16ef543a0ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -38,7 +38,9 @@ define <2 x i8> @vssubu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -62,7 +64,9 @@ define <2 x i8> @vssubu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -73,7 +77,9 @@ define <2 x i8> @vssubu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -84,7 +90,11 @@ define <2 x i8> @vssubu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
 
@@ -106,7 +116,9 @@ define <4 x i8> @vssubu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -144,7 +156,9 @@ define <4 x i8> @vssubu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -155,7 +169,9 @@ define <4 x i8> @vssubu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -166,7 +182,11 @@ define <4 x i8> @vssubu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
@@ -188,7 +208,9 @@ define <5 x i8> @vssubu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -212,7 +234,9 @@ define <5 x i8> @vssubu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -223,7 +247,9 @@ define <5 x i8> @vssubu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> %m, i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -234,7 +260,11 @@ define <5 x i8> @vssubu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> splat (i8 -1), <5 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> poison, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
   ret <5 x i8> %v
 }
 
@@ -256,7 +286,9 @@ define <8 x i8> @vssubu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -280,7 +312,9 @@ define <8 x i8> @vssubu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -291,7 +325,9 @@ define <8 x i8> @vssubu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -302,7 +338,11 @@ define <8 x i8> @vssubu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
 
@@ -324,7 +364,9 @@ define <16 x i8> @vssubu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -348,7 +390,9 @@ define <16 x i8> @vssubu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
   %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -359,7 +403,9 @@ define <16 x i8> @vssubu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -370,7 +416,11 @@ define <16 x i8> @vssubu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
 
@@ -398,7 +448,9 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -421,7 +473,11 @@ define <256 x i8> @vssubu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v16, v16, a2
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %head = insertelement <256 x i1> poison, i1 true, i32 0
+  %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
   ret <256 x i8> %v
 }
 
@@ -439,7 +495,9 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
   ret <256 x i8> %v
 }
 
@@ -457,7 +515,9 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
+  %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+  %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+  %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
   ret <256 x i8> %v
 }
 
@@ -479,7 +539,9 @@ define <2 x i16> @vssubu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -503,7 +565,9 @@ define <2 x i16> @vssubu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -514,7 +578,9 @@ define <2 x i16> @vssubu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -525,7 +591,11 @@ define <2 x i16> @vssubu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
 
@@ -547,7 +617,9 @@ define <4 x i16> @vssubu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -571,7 +643,9 @@ define <4 x i16> @vssubu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -582,7 +656,9 @@ define <4 x i16> @vssubu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -593,7 +669,11 @@ define <4 x i16> @vssubu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
@@ -615,7 +695,9 @@ define <8 x i16> @vssubu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -639,7 +721,9 @@ define <8 x i16> @vssubu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -650,7 +734,9 @@ define <8 x i16> @vssubu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -661,7 +747,11 @@ define <8 x i16> @vssubu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
 
@@ -683,7 +773,9 @@ define <16 x i16> @vssubu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -707,7 +799,9 @@ define <16 x i16> @vssubu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
   %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -718,7 +812,9 @@ define <16 x i16> @vssubu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -729,7 +825,11 @@ define <16 x i16> @vssubu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
 
@@ -751,7 +851,9 @@ define <2 x i32> @vssubu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -775,7 +877,9 @@ define <2 x i32> @vssubu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -786,7 +890,9 @@ define <2 x i32> @vssubu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -797,7 +903,11 @@ define <2 x i32> @vssubu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
 
@@ -819,7 +929,9 @@ define <4 x i32> @vssubu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -843,7 +955,9 @@ define <4 x i32> @vssubu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -854,7 +968,9 @@ define <4 x i32> @vssubu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -865,7 +981,11 @@ define <4 x i32> @vssubu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
@@ -887,7 +1007,9 @@ define <8 x i32> @vssubu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -911,7 +1033,9 @@ define <8 x i32> @vssubu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %e
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -922,7 +1046,9 @@ define <8 x i32> @vssubu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -933,7 +1059,11 @@ define <8 x i32> @vssubu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
 
@@ -955,7 +1085,9 @@ define <16 x i32> @vssubu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -979,7 +1111,9 @@ define <16 x i32> @vssubu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
   %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -990,7 +1124,9 @@ define <16 x i32> @vssubu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1001,7 +1137,11 @@ define <16 x i32> @vssubu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
 
@@ -1023,7 +1163,9 @@ define <2 x i64> @vssubu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1075,7 +1217,9 @@ define <2 x i64> @vssubu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1086,7 +1230,9 @@ define <2 x i64> @vssubu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1097,7 +1243,11 @@ define <2 x i64> @vssubu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+  %head = insertelement <2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+  %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
 
@@ -1119,7 +1269,9 @@ define <4 x i64> @vssubu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1171,7 +1323,9 @@ define <4 x i64> @vssubu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1182,7 +1336,9 @@ define <4 x i64> @vssubu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1193,7 +1349,11 @@ define <4 x i64> @vssubu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
@@ -1215,7 +1375,9 @@ define <8 x i64> @vssubu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1267,7 +1429,9 @@ define <8 x i64> @vssubu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %e
 ; RV64-NEXT:    ret
   %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1278,7 +1442,9 @@ define <8 x i64> @vssubu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1289,7 +1455,11 @@ define <8 x i64> @vssubu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+  %head = insertelement <8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+  %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
 
@@ -1311,7 +1481,9 @@ define <16 x i64> @vssubu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1363,7 +1535,9 @@ define <16 x i64> @vssubu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
 ; RV64-NEXT:    ret
   %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
   %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1374,7 +1548,9 @@ define <16 x i64> @vssubu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1385,7 +1561,11 @@ define <16 x i64> @vssubu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssubu.vx v8, v8, a1
 ; CHECK-NEXT:    ret
-  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+  %head = insertelement <16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+  %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
 
@@ -1397,16 +1577,16 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV32-LABEL: vssubu_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    li a2, 16
 ; RV32-NEXT:    vslidedown.vi v7, v0, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB108_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB108_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v24, v0.t
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1439,22 +1619,24 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a2, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
 define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vssubu_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-NEXT:    li a2, 16
+; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:    bltu a0, a2, .LBB109_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:  .LBB109_2:
-; RV32-NEXT:    li a2, 32
-; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vssubu.vv v8, v8, v24
 ; RV32-NEXT:    addi a1, a0, -16
@@ -1483,7 +1665,11 @@ define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vssubu.vx v16, v16, a2
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> splat (i1 true), i32 %evl)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %head = insertelement <32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
 
@@ -1515,7 +1701,9 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
   ret <32 x i64> %v
 }
 
@@ -1545,6 +1733,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV64-NEXT:    vmv1r.v v0, v24
 ; RV64-NEXT:    vssubu.vx v16, v16, a0, v0.t
 ; RV64-NEXT:    ret
-  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
+  %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+  %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+  %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index b78b8663eac90b..7774e3e4775af8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -76,7 +76,9 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.maximum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -148,7 +150,9 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.maximum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -222,7 +226,9 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.maximum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -298,7 +304,9 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.maximum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -400,7 +408,9 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.maximum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -616,6 +626,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -623,8 +635,6 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v7, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -705,7 +715,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.maximum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -739,7 +751,9 @@ define <vscale x 1 x float> @vfmax_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.maximum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -773,7 +787,9 @@ define <vscale x 2 x float> @vfmax_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.maximum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -809,7 +825,9 @@ define <vscale x 4 x float> @vfmax_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.maximum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -845,7 +863,9 @@ define <vscale x 8 x float> @vfmax_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.maximum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -879,7 +899,9 @@ define <vscale x 1 x double> @vfmax_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.maximum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -915,7 +937,9 @@ define <vscale x 2 x double> @vfmax_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.maximum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -951,7 +975,9 @@ define <vscale x 4 x double> @vfmax_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.maximum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1000,7 +1026,9 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.maximum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1210,6 +1238,8 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.maximum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 69c76152910e86..4e98d0581f896d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -76,7 +76,9 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.minimum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -148,7 +150,9 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.minimum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -222,7 +226,9 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.minimum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -298,7 +304,9 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.minimum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -400,7 +408,9 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.minimum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -616,6 +626,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 5
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -623,8 +635,6 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v7, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -705,7 +715,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.minimum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -739,7 +751,9 @@ define <vscale x 1 x float> @vfmin_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.minimum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -773,7 +787,9 @@ define <vscale x 2 x float> @vfmin_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.minimum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -809,7 +825,9 @@ define <vscale x 4 x float> @vfmin_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.minimum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -845,7 +863,9 @@ define <vscale x 8 x float> @vfmin_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.minimum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -879,7 +899,9 @@ define <vscale x 1 x double> @vfmin_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v11
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.minimum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -915,7 +937,9 @@ define <vscale x 2 x double> @vfmin_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v14
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.minimum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -951,7 +975,9 @@ define <vscale x 4 x double> @vfmin_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v20
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.minimum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1000,7 +1026,9 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    vfmin.vv v8, v8, v24
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.minimum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1210,6 +1238,8 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.minimum.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x double> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 8bc2334282653f..126836cd9390b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -86,7 +86,9 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -168,7 +170,9 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,7 +256,9 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -338,7 +344,9 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -424,7 +432,9 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -544,6 +554,8 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -551,8 +563,6 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -599,7 +609,9 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -641,7 +653,9 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.nearbyint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.nearbyint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -683,7 +697,9 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.nearbyint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.nearbyint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -727,7 +743,9 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.nearbyint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -771,7 +789,9 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.nearbyint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -815,7 +835,9 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.nearbyint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -857,7 +879,9 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -901,7 +925,9 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -945,7 +971,9 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -989,7 +1017,9 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1033,7 +1063,9 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1157,6 +1189,8 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index f934127f978dc9..04761d4e7bfc4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -78,7 +78,9 @@ define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -152,7 +154,9 @@ define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -228,7 +232,9 @@ define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -306,7 +312,9 @@ define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i3
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -384,7 +392,9 @@ define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -497,6 +507,8 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -504,8 +516,6 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -548,7 +558,9 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -586,7 +598,9 @@ define <vscale x 1 x float> @vp_rint_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.rint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.rint.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -624,7 +638,9 @@ define <vscale x 2 x float> @vp_rint_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.rint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.rint.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -664,7 +680,9 @@ define <vscale x 4 x float> @vp_rint_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.rint.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -704,7 +722,9 @@ define <vscale x 8 x float> @vp_rint_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.rint.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -744,7 +764,9 @@ define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.rint.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -782,7 +804,9 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -822,7 +846,9 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -862,7 +888,9 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -902,7 +930,9 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -942,7 +972,9 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1047,6 +1079,8 @@ define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index eb4994914fad9a..16bd665dd0de98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -86,7 +86,9 @@ define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -168,7 +170,9 @@ define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,7 +256,9 @@ define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -338,7 +344,9 @@ define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -424,7 +432,9 @@ define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -545,6 +555,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -552,8 +564,6 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -600,7 +610,9 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -642,7 +654,9 @@ define <vscale x 1 x float> @vp_round_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.round.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.round.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -684,7 +698,9 @@ define <vscale x 2 x float> @vp_round_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.round.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.round.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -728,7 +744,9 @@ define <vscale x 4 x float> @vp_round_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.round.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -772,7 +790,9 @@ define <vscale x 8 x float> @vp_round_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.round.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -816,7 +836,9 @@ define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.round.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -858,7 +880,9 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -902,7 +926,9 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -946,7 +972,9 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -990,7 +1018,9 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1034,7 +1064,9 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1147,6 +1179,8 @@ define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index f366a2922d079e..429ddb6c71be34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -86,7 +86,9 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -168,7 +170,9 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,7 +256,9 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -338,7 +344,9 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -424,7 +432,9 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -545,6 +555,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -552,8 +564,6 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -600,7 +610,9 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -642,7 +654,9 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.roundeven.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -684,7 +698,9 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.roundeven.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -728,7 +744,9 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.roundeven.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -772,7 +790,9 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float>
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.roundeven.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -816,7 +836,9 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x floa
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.roundeven.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -858,7 +880,9 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -902,7 +926,9 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -946,7 +972,9 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -990,7 +1018,9 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1034,7 +1064,9 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1147,6 +1179,8 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x dou
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 79c940bdf089a5..c854e0fb8a05d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -86,7 +86,9 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -168,7 +170,9 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -252,7 +256,9 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -338,7 +344,9 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half>
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -424,7 +432,9 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x hal
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -545,6 +555,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -552,8 +564,6 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v16, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -600,7 +610,9 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -642,7 +654,9 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32_unmasked(<vscale x 1 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.roundtozero.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.roundtozero.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -684,7 +698,9 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32_unmasked(<vscale x 2 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.roundtozero.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.roundtozero.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -728,7 +744,9 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32_unmasked(<vscale x 4 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.roundtozero.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -772,7 +790,9 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32_unmasked(<vscale x 8 x float
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.roundtozero.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -816,7 +836,9 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x fl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.roundtozero.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -858,7 +880,9 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -902,7 +926,9 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -946,7 +972,9 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -990,7 +1018,9 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -1034,7 +1064,9 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1147,6 +1179,8 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x d
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
index ea428bb4771718..6b584cfb22a52e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
@@ -206,7 +206,9 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
   %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-  %offset = shl <vscale x 1 x i64> %step, splat (i64 3)
+  %splat.insert = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
+  %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %offset = shl <vscale x 1 x i64> %step, %splat
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
   <vscale x 1 x ptr> %ptrs,
@@ -220,13 +222,17 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 define <vscale x 1 x i64> @neg_shl_is_not_commutative(ptr %p) {
 ; CHECK-LABEL: @neg_shl_is_not_commutative(
 ; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-; CHECK-NEXT:    [[OFFSET:%.*]] = shl <vscale x 1 x i64> shufflevector (<vscale x 1 x i64> insertelement (<vscale x 1 x i64> poison, i64 3, i64 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), [[STEP]]
+; CHECK-NEXT:    [[SPLAT_INSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
+; CHECK-NEXT:    [[SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[SPLAT_INSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT:    [[OFFSET:%.*]] = shl <vscale x 1 x i64> [[SPLAT]], [[STEP]]
 ; CHECK-NEXT:    [[PTRS:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 1 x i64> [[OFFSET]]
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> [[PTRS]], i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> poison)
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
   %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-  %offset = shl <vscale x 1 x i64> splat (i64 3), %step
+  %splat.insert = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
+  %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %offset = shl <vscale x 1 x i64> %splat, %step
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
   <vscale x 1 x ptr> %ptrs,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index 939a45e15c1037..4de71b6ce06fbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfsgnj_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -92,7 +94,9 @@ define <vscale x 2 x half> @vfsgnj_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -136,7 +140,9 @@ define <vscale x 4 x half> @vfsgnj_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -180,7 +186,9 @@ define <vscale x 8 x half> @vfsgnj_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -224,7 +232,9 @@ define <vscale x 16 x half> @vfsgnj_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -301,6 +311,8 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -308,8 +320,6 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -338,7 +348,9 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -360,7 +372,9 @@ define <vscale x 1 x float> @vfsgnj_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -382,7 +396,9 @@ define <vscale x 2 x float> @vfsgnj_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -404,7 +420,9 @@ define <vscale x 4 x float> @vfsgnj_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -426,7 +444,9 @@ define <vscale x 8 x float> @vfsgnj_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -448,7 +468,9 @@ define <vscale x 16 x float> @vfsgnj_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -470,7 +492,9 @@ define <vscale x 1 x double> @vfsgnj_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -492,7 +516,9 @@ define <vscale x 2 x double> @vfsgnj_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -514,7 +540,9 @@ define <vscale x 4 x double> @vfsgnj_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -536,6 +564,8 @@ define <vscale x 8 x double> @vfsgnj_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index df2bc523cd7a83..22f0d8bba2e629 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -46,7 +46,9 @@ define <vscale x 1 x half> @vfabs_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -88,7 +90,9 @@ define <vscale x 2 x half> @vfabs_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -130,7 +134,9 @@ define <vscale x 4 x half> @vfabs_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -172,7 +178,9 @@ define <vscale x 8 x half> @vfabs_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -214,7 +222,9 @@ define <vscale x 16 x half> @vfabs_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -269,6 +279,8 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -276,8 +288,6 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -296,7 +306,9 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -318,7 +330,9 @@ define <vscale x 1 x float> @vfabs_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -340,7 +354,9 @@ define <vscale x 2 x float> @vfabs_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -362,7 +378,9 @@ define <vscale x 4 x float> @vfabs_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -384,7 +402,9 @@ define <vscale x 8 x float> @vfabs_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -406,7 +426,9 @@ define <vscale x 16 x float> @vfabs_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -428,7 +450,9 @@ define <vscale x 1 x double> @vfabs_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -450,7 +474,9 @@ define <vscale x 2 x double> @vfabs_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -472,7 +498,9 @@ define <vscale x 4 x double> @vfabs_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -494,7 +522,9 @@ define <vscale x 7 x double> @vfabs_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -516,7 +546,9 @@ define <vscale x 8 x double> @vfabs_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -566,6 +598,8 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index c69a7bc5cece47..4168f5cd507911 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -129,7 +131,9 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -156,7 +160,9 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -200,7 +206,9 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -254,7 +262,9 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -298,7 +308,9 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -352,7 +364,9 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -396,7 +410,9 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -450,7 +466,9 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -494,7 +512,9 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -548,7 +568,9 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -625,6 +647,8 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -632,8 +656,6 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -662,7 +684,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -728,6 +752,8 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -735,8 +761,6 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -758,7 +782,9 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -780,7 +806,9 @@ define <vscale x 1 x float> @vfadd_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -804,7 +832,9 @@ define <vscale x 1 x float> @vfadd_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -826,7 +856,9 @@ define <vscale x 2 x float> @vfadd_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -850,7 +882,9 @@ define <vscale x 2 x float> @vfadd_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -872,7 +906,9 @@ define <vscale x 4 x float> @vfadd_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -896,7 +932,9 @@ define <vscale x 4 x float> @vfadd_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -918,7 +956,9 @@ define <vscale x 8 x float> @vfadd_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -942,7 +982,9 @@ define <vscale x 8 x float> @vfadd_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -964,7 +1006,9 @@ define <vscale x 16 x float> @vfadd_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -988,7 +1032,9 @@ define <vscale x 16 x float> @vfadd_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -1010,7 +1056,9 @@ define <vscale x 1 x double> @vfadd_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1034,7 +1082,9 @@ define <vscale x 1 x double> @vfadd_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1056,7 +1106,9 @@ define <vscale x 2 x double> @vfadd_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1080,7 +1132,9 @@ define <vscale x 2 x double> @vfadd_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1102,7 +1156,9 @@ define <vscale x 4 x double> @vfadd_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1126,7 +1182,9 @@ define <vscale x 4 x double> @vfadd_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1160,7 +1218,9 @@ define <vscale x 8 x double> @vfadd_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1184,6 +1244,8 @@ define <vscale x 8 x double> @vfadd_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 3ad17e85570a20..396e99bc5e4f5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfdiv_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -102,7 +104,9 @@ define <vscale x 1 x half> @vfdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -146,7 +150,9 @@ define <vscale x 2 x half> @vfdiv_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -200,7 +206,9 @@ define <vscale x 2 x half> @vfdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -244,7 +252,9 @@ define <vscale x 4 x half> @vfdiv_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -298,7 +308,9 @@ define <vscale x 4 x half> @vfdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -342,7 +354,9 @@ define <vscale x 8 x half> @vfdiv_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -396,7 +410,9 @@ define <vscale x 8 x half> @vfdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -440,7 +456,9 @@ define <vscale x 16 x half> @vfdiv_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -494,7 +512,9 @@ define <vscale x 16 x half> @vfdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -571,6 +591,8 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -578,8 +600,6 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -608,7 +628,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -674,6 +696,8 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -681,8 +705,6 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -704,7 +726,9 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -726,7 +750,9 @@ define <vscale x 1 x float> @vfdiv_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -750,7 +776,9 @@ define <vscale x 1 x float> @vfdiv_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -772,7 +800,9 @@ define <vscale x 2 x float> @vfdiv_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -796,7 +826,9 @@ define <vscale x 2 x float> @vfdiv_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -818,7 +850,9 @@ define <vscale x 4 x float> @vfdiv_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -842,7 +876,9 @@ define <vscale x 4 x float> @vfdiv_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -864,7 +900,9 @@ define <vscale x 8 x float> @vfdiv_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -888,7 +926,9 @@ define <vscale x 8 x float> @vfdiv_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -910,7 +950,9 @@ define <vscale x 16 x float> @vfdiv_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -934,7 +976,9 @@ define <vscale x 16 x float> @vfdiv_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -956,7 +1000,9 @@ define <vscale x 1 x double> @vfdiv_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -980,7 +1026,9 @@ define <vscale x 1 x double> @vfdiv_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1002,7 +1050,9 @@ define <vscale x 2 x double> @vfdiv_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1026,7 +1076,9 @@ define <vscale x 2 x double> @vfdiv_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1048,7 +1100,9 @@ define <vscale x 4 x double> @vfdiv_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1072,7 +1126,9 @@ define <vscale x 4 x double> @vfdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1106,7 +1162,9 @@ define <vscale x 8 x double> @vfdiv_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1130,6 +1188,8 @@ define <vscale x 8 x double> @vfdiv_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 7556b3ace5c6cb..72101d62567b4d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -92,7 +94,9 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -136,7 +140,9 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -180,7 +186,9 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -224,7 +232,9 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -301,6 +311,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -308,8 +320,6 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -338,7 +348,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -360,7 +372,9 @@ define <vscale x 1 x float> @vfmax_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -382,7 +396,9 @@ define <vscale x 2 x float> @vfmax_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -404,7 +420,9 @@ define <vscale x 4 x float> @vfmax_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -426,7 +444,9 @@ define <vscale x 8 x float> @vfmax_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -448,7 +468,9 @@ define <vscale x 16 x float> @vfmax_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -470,7 +492,9 @@ define <vscale x 1 x double> @vfmax_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -492,7 +516,9 @@ define <vscale x 2 x double> @vfmax_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -514,7 +540,9 @@ define <vscale x 4 x double> @vfmax_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -536,6 +564,8 @@ define <vscale x 8 x double> @vfmax_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index 755c665376128a..15fa24a35b7d59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -92,7 +94,9 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -136,7 +140,9 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -180,7 +186,9 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -224,7 +232,9 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -301,6 +311,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -308,8 +320,6 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -338,7 +348,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -360,7 +372,9 @@ define <vscale x 1 x float> @vfmin_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -382,7 +396,9 @@ define <vscale x 2 x float> @vfmin_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -404,7 +420,9 @@ define <vscale x 4 x float> @vfmin_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -426,7 +444,9 @@ define <vscale x 8 x float> @vfmin_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -448,7 +468,9 @@ define <vscale x 16 x float> @vfmin_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -470,7 +492,9 @@ define <vscale x 1 x double> @vfmin_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -492,7 +516,9 @@ define <vscale x 2 x double> @vfmin_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -514,7 +540,9 @@ define <vscale x 4 x double> @vfmin_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -536,6 +564,8 @@ define <vscale x 8 x double> @vfmin_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 30d5919238cfa5..bb9d3cfed3001a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfmul_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -102,7 +104,9 @@ define <vscale x 1 x half> @vfmul_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -146,7 +150,9 @@ define <vscale x 2 x half> @vfmul_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -200,7 +206,9 @@ define <vscale x 2 x half> @vfmul_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -244,7 +252,9 @@ define <vscale x 4 x half> @vfmul_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -298,7 +308,9 @@ define <vscale x 4 x half> @vfmul_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -342,7 +354,9 @@ define <vscale x 8 x half> @vfmul_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -396,7 +410,9 @@ define <vscale x 8 x half> @vfmul_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -440,7 +456,9 @@ define <vscale x 16 x half> @vfmul_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -494,7 +512,9 @@ define <vscale x 16 x half> @vfmul_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -571,6 +591,8 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -578,8 +600,6 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -608,7 +628,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -674,6 +696,8 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -681,8 +705,6 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -704,7 +726,9 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -726,7 +750,9 @@ define <vscale x 1 x float> @vfmul_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -750,7 +776,9 @@ define <vscale x 1 x float> @vfmul_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -772,7 +800,9 @@ define <vscale x 2 x float> @vfmul_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -796,7 +826,9 @@ define <vscale x 2 x float> @vfmul_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -818,7 +850,9 @@ define <vscale x 4 x float> @vfmul_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -842,7 +876,9 @@ define <vscale x 4 x float> @vfmul_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -864,7 +900,9 @@ define <vscale x 8 x float> @vfmul_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -888,7 +926,9 @@ define <vscale x 8 x float> @vfmul_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -910,7 +950,9 @@ define <vscale x 16 x float> @vfmul_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -934,7 +976,9 @@ define <vscale x 16 x float> @vfmul_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -956,7 +1000,9 @@ define <vscale x 1 x double> @vfmul_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -980,7 +1026,9 @@ define <vscale x 1 x double> @vfmul_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1002,7 +1050,9 @@ define <vscale x 2 x double> @vfmul_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1026,7 +1076,9 @@ define <vscale x 2 x double> @vfmul_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1048,7 +1100,9 @@ define <vscale x 4 x double> @vfmul_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1072,7 +1126,9 @@ define <vscale x 4 x double> @vfmul_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1106,7 +1162,9 @@ define <vscale x 8 x double> @vfmul_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1130,6 +1188,8 @@ define <vscale x 8 x double> @vfmul_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index 1db5fa1720a276..ef08865100f14b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -46,7 +46,9 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -88,7 +90,9 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -130,7 +134,9 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -172,7 +178,9 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -214,7 +222,9 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -269,6 +279,8 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -276,8 +288,6 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -296,7 +306,9 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -318,7 +330,9 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -340,7 +354,9 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -362,7 +378,9 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -384,7 +402,9 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -406,7 +426,9 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -428,7 +450,9 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -450,7 +474,9 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -472,7 +498,9 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -494,7 +522,9 @@ define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -516,7 +546,9 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -566,6 +598,8 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index d6caad15e40a2c..b13d221e00e634 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -46,7 +46,9 @@ define <vscale x 1 x half> @vfsqrt_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.sqrt.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.sqrt.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -88,7 +90,9 @@ define <vscale x 2 x half> @vfsqrt_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.sqrt.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.sqrt.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -130,7 +134,9 @@ define <vscale x 4 x half> @vfsqrt_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.sqrt.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.sqrt.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -172,7 +178,9 @@ define <vscale x 8 x half> @vfsqrt_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.sqrt.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.sqrt.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -214,7 +222,9 @@ define <vscale x 16 x half> @vfsqrt_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.sqrt.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.sqrt.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -269,6 +279,8 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -276,8 +288,6 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -296,7 +306,9 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.sqrt.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.sqrt.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -318,7 +330,9 @@ define <vscale x 1 x float> @vfsqrt_vv_nxv1f32_unmasked(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.sqrt.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.sqrt.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -340,7 +354,9 @@ define <vscale x 2 x float> @vfsqrt_vv_nxv2f32_unmasked(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.sqrt.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.sqrt.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -362,7 +378,9 @@ define <vscale x 4 x float> @vfsqrt_vv_nxv4f32_unmasked(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.sqrt.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.sqrt.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -384,7 +402,9 @@ define <vscale x 8 x float> @vfsqrt_vv_nxv8f32_unmasked(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.sqrt.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.sqrt.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -406,7 +426,9 @@ define <vscale x 16 x float> @vfsqrt_vv_nxv16f32_unmasked(<vscale x 16 x float>
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.sqrt.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.sqrt.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -428,7 +450,9 @@ define <vscale x 1 x double> @vfsqrt_vv_nxv1f64_unmasked(<vscale x 1 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.sqrt.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.sqrt.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -450,7 +474,9 @@ define <vscale x 2 x double> @vfsqrt_vv_nxv2f64_unmasked(<vscale x 2 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.sqrt.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.sqrt.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -472,7 +498,9 @@ define <vscale x 4 x double> @vfsqrt_vv_nxv4f64_unmasked(<vscale x 4 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.sqrt.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.sqrt.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -494,7 +522,9 @@ define <vscale x 7 x double> @vfsqrt_vv_nxv7f64_unmasked(<vscale x 7 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 7 x double> @llvm.vp.sqrt.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x double> @llvm.vp.sqrt.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x double> %v
 }
 
@@ -516,7 +546,9 @@ define <vscale x 8 x double> @vfsqrt_vv_nxv8f64_unmasked(<vscale x 8 x double> %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.sqrt.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.sqrt.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -566,6 +598,8 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64_unmasked(<vscale x 16 x double
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 2eae18d7cc4937..010b133e51b1f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -48,7 +48,9 @@ define <vscale x 1 x half> @vfsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -102,7 +104,9 @@ define <vscale x 1 x half> @vfsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
 }
 
@@ -146,7 +150,9 @@ define <vscale x 2 x half> @vfsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -200,7 +206,9 @@ define <vscale x 2 x half> @vfsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
 }
 
@@ -244,7 +252,9 @@ define <vscale x 4 x half> @vfsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -298,7 +308,9 @@ define <vscale x 4 x half> @vfsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
 }
 
@@ -342,7 +354,9 @@ define <vscale x 8 x half> @vfsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -396,7 +410,9 @@ define <vscale x 8 x half> @vfsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
 }
 
@@ -440,7 +456,9 @@ define <vscale x 16 x half> @vfsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -494,7 +512,9 @@ define <vscale x 16 x half> @vfsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
 }
 
@@ -571,6 +591,8 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    slli a1, a1, 3
 ; ZVFHMIN-NEXT:    sub sp, sp, a1
 ; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -578,8 +600,6 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v24
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -608,7 +628,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    add sp, sp, a0
 ; ZVFHMIN-NEXT:    addi sp, sp, 16
 ; ZVFHMIN-NEXT:    ret
-  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -674,6 +696,8 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vfmv.v.f v16, fa5
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v4, v16
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    csrr a2, vlenb
 ; ZVFHMIN-NEXT:    slli a1, a2, 1
 ; ZVFHMIN-NEXT:    sub a3, a0, a1
@@ -681,8 +705,6 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    addi a4, a4, -1
 ; ZVFHMIN-NEXT:    and a3, a4, a3
 ; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
 ; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vslidedown.vx v0, v16, a2
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
@@ -704,7 +726,9 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
   %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
-  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
 }
 
@@ -726,7 +750,9 @@ define <vscale x 1 x float> @vfsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -750,7 +776,9 @@ define <vscale x 1 x float> @vfsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %v
 }
 
@@ -772,7 +800,9 @@ define <vscale x 2 x float> @vfsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -796,7 +826,9 @@ define <vscale x 2 x float> @vfsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %v
 }
 
@@ -818,7 +850,9 @@ define <vscale x 4 x float> @vfsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -842,7 +876,9 @@ define <vscale x 4 x float> @vfsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %v
 }
 
@@ -864,7 +900,9 @@ define <vscale x 8 x float> @vfsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -888,7 +926,9 @@ define <vscale x 8 x float> @vfsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va,
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %v
 }
 
@@ -910,7 +950,9 @@ define <vscale x 16 x float> @vfsub_vv_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -934,7 +976,9 @@ define <vscale x 16 x float> @vfsub_vf_nxv16f32_unmasked(<vscale x 16 x float> %
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
   %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
-  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x float> %v
 }
 
@@ -956,7 +1000,9 @@ define <vscale x 1 x double> @vfsub_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -980,7 +1026,9 @@ define <vscale x 1 x double> @vfsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
-  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %v
 }
 
@@ -1002,7 +1050,9 @@ define <vscale x 2 x double> @vfsub_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1026,7 +1076,9 @@ define <vscale x 2 x double> @vfsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
-  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %v
 }
 
@@ -1048,7 +1100,9 @@ define <vscale x 4 x double> @vfsub_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1072,7 +1126,9 @@ define <vscale x 4 x double> @vfsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
-  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %v
 }
 
@@ -1106,7 +1162,9 @@ define <vscale x 8 x double> @vfsub_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }
 
@@ -1130,6 +1188,8 @@ define <vscale x 8 x double> @vfsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %v
 ; CHECK-NEXT:    ret
   %elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
   %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
-  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %v
 }



More information about the llvm-commits mailing list